]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/open-fcoe-beta6-update
Merge branch 'master' of git://git.ipfire.org/ipfire-2.x
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / open-fcoe-beta6-update
1 From: Vasu Dev <vasu.dev@linux.intel.com>
2 Subject: Incremental Open-FCoE for Beta6
3 References: bnc#438954
4
5 Incremental Open-FCoE update for Beta6.
6
7 Signed-off-by: Vasu Dev <vasu.dev@linux.intel.com>
8 Acked-by: Hannes Reinecke <hare@suse.de>
9 ---
10 diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
11 index 6f38b13..4922958 100644
12 --- a/drivers/scsi/Kconfig
13 +++ b/drivers/scsi/Kconfig
14 @@ -328,19 +328,6 @@ menuconfig SCSI_LOWLEVEL
15
16 if SCSI_LOWLEVEL && SCSI
17
18 -config LIBFC
19 - tristate "LibFC module"
20 - depends on SCSI && SCSI_FC_ATTRS
21 - ---help---
22 - Fibre Channel library module
23 -
24 -config FCOE
25 - tristate "FCoE module"
26 - depends on SCSI && SCSI_FC_ATTRS
27 - select LIBFC
28 - ---help---
29 - Fibre Channel over Ethernet module
30 -
31 config ISCSI_TCP
32 tristate "iSCSI Initiator over TCP/IP"
33 depends on SCSI && INET
34 @@ -616,6 +603,20 @@ config SCSI_FLASHPOINT
35 substantial, so users of MultiMaster Host Adapters may not
36 wish to include it.
37
38 +config LIBFC
39 + tristate "LibFC module"
40 + depends on SCSI && SCSI_FC_ATTRS
41 + ---help---
42 + Fibre Channel library module
43 +
44 +config FCOE
45 + tristate "FCoE module"
46 + depends on SCSI
47 + select LIBFC
48 + ---help---
49 + Fibre Channel over Ethernet module
50 +
51 +
52 config SCSI_DMX3191D
53 tristate "DMX3191D SCSI support"
54 depends on PCI && SCSI
55 diff --git a/drivers/scsi/fcoe/fc_transport_fcoe.c b/drivers/scsi/fcoe/fc_transport_fcoe.c
56 index ff207b2..bf7fe6f 100644
57 --- a/drivers/scsi/fcoe/fc_transport_fcoe.c
58 +++ b/drivers/scsi/fcoe/fc_transport_fcoe.c
59 @@ -17,356 +17,430 @@
60 * Maintained at www.Open-FCoE.org
61 */
62
63 -#include <linux/module.h>
64 -#include <linux/version.h>
65 -#include <linux/kernel.h>
66 -#include <linux/kthread.h>
67 -#include <linux/spinlock.h>
68 -#include <linux/cpu.h>
69 -#include <linux/netdevice.h>
70 -#include <linux/etherdevice.h>
71 -#include <linux/ethtool.h>
72 -#include <linux/if_ether.h>
73 -#include <linux/fs.h>
74 -#include <linux/sysfs.h>
75 -#include <linux/ctype.h>
76 -
77 -#include <scsi/libfc/libfc.h>
78 -
79 -#include "fcoe_def.h"
80 -
81 -MODULE_AUTHOR("Open-FCoE.org");
82 -MODULE_DESCRIPTION("FCoE");
83 -MODULE_LICENSE("GPL");
84 -MODULE_VERSION("1.0.3");
85 +#include <linux/pci.h>
86 +#include <scsi/libfcoe.h>
87 +#include <scsi/fc_transport_fcoe.h>
88
89 -/*
90 - * Static functions and variables definations
91 - */
92 -#ifdef CONFIG_HOTPLUG_CPU
93 -static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
94 -#endif /* CONFIG_HOTPLUG_CPU */
95 -static int fcoe_device_notification(struct notifier_block *, ulong, void *);
96 -static void fcoe_dev_setup(void);
97 -static void fcoe_dev_cleanup(void);
98 -
99 -#ifdef CONFIG_HOTPLUG_CPU
100 -static struct notifier_block fcoe_cpu_notifier = {
101 - .notifier_call = fcoe_cpu_callback,
102 +/* internal fcoe transport */
103 +struct fcoe_transport_internal {
104 + struct fcoe_transport *t;
105 + struct net_device *netdev;
106 + struct list_head list;
107 };
108 -#endif /* CONFIG_HOTPLUG_CPU */
109
110 -/*
111 - * notification function from net device
112 - */
113 -static struct notifier_block fcoe_notifier = {
114 - .notifier_call = fcoe_device_notification,
115 -};
116 +/* fcoe transports list and its lock */
117 +static LIST_HEAD(fcoe_transports);
118 +static DEFINE_MUTEX(fcoe_transports_lock);
119
120 -#ifdef CONFIG_HOTPLUG_CPU
121 -/*
122 - * create percpu stats block
123 - * called by cpu add/remove notifier
124 - */
125 -static void fcoe_create_percpu_data(int cpu)
126 +/**
127 + * fcoe_transport_default - returns ptr to the default transport fcoe_sw
128 + **/
129 +struct fcoe_transport *fcoe_transport_default(void)
130 {
131 - struct fc_lport *lp;
132 - struct fcoe_softc *fc;
133 -
134 - write_lock_bh(&fcoe_hostlist_lock);
135 - list_for_each_entry(fc, &fcoe_hostlist, list) {
136 - lp = fc->lp;
137 - if (lp->dev_stats[cpu] == NULL)
138 - lp->dev_stats[cpu] = kzalloc(sizeof(struct fcoe_dev_stats),
139 - GFP_KERNEL);
140 - }
141 - write_unlock_bh(&fcoe_hostlist_lock);
142 + return &fcoe_sw_transport;
143 }
144
145 -/*
146 - * destroy percpu stats block
147 - * called by cpu add/remove notifier
148 - */
149 -static void fcoe_destroy_percpu_data(int cpu)
150 +/**
151 + * fcoe_transport_to_pcidev - get the pci dev from a netdev
152 + * @netdev: the netdev that pci dev will be retrived from
153 + *
154 + * Returns: NULL or the corrsponding pci_dev
155 + **/
156 +struct pci_dev *fcoe_transport_pcidev(const struct net_device *netdev)
157 {
158 - struct fc_lport *lp;
159 - struct fcoe_softc *fc;
160 -
161 - write_lock_bh(&fcoe_hostlist_lock);
162 - list_for_each_entry(fc, &fcoe_hostlist, list) {
163 - lp = fc->lp;
164 - kfree(lp->dev_stats[cpu]);
165 - lp->dev_stats[cpu] = NULL;
166 - }
167 - write_unlock_bh(&fcoe_hostlist_lock);
168 + if (!netdev->dev.parent)
169 + return NULL;
170 + return to_pci_dev(netdev->dev.parent);
171 }
172
173 -/*
174 - * Get notified when a cpu comes on/off. Be hotplug friendly.
175 - */
176 -static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
177 - void *hcpu)
178 +/**
179 + * fcoe_transport_device_lookup - find out netdev is managed by the
180 + * transport
181 + * assign a transport to a device
182 + * @netdev: the netdev the transport to be attached to
183 + *
184 + * This will look for existing offload driver, if not found, it falls back to
185 + * the default sw hba (fcoe_sw) as its fcoe transport.
186 + *
187 + * Returns: 0 for success
188 + **/
189 +static struct fcoe_transport_internal *fcoe_transport_device_lookup(
190 + struct fcoe_transport *t, struct net_device *netdev)
191 {
192 - unsigned int cpu = (unsigned long)hcpu;
193 -
194 - switch (action) {
195 - case CPU_ONLINE:
196 - fcoe_create_percpu_data(cpu);
197 - break;
198 - case CPU_DEAD:
199 - fcoe_destroy_percpu_data(cpu);
200 - break;
201 - default:
202 - break;
203 + struct fcoe_transport_internal *ti;
204 +
205 + /* assign the transpor to this device */
206 + mutex_lock(&t->devlock);
207 + list_for_each_entry(ti, &t->devlist, list) {
208 + if (ti->netdev == netdev) {
209 + mutex_unlock(&t->devlock);
210 + return ti;
211 + }
212 }
213 - return NOTIFY_OK;
214 + mutex_unlock(&t->devlock);
215 + return NULL;
216 }
217 -#endif /* CONFIG_HOTPLUG_CPU */
218 -
219 -/*
220 - * function to setup link change notification interface
221 - */
222 -static void fcoe_dev_setup(void)
223 +/**
224 + * fcoe_transport_device_add - assign a transport to a device
225 + * @netdev: the netdev the transport to be attached to
226 + *
227 + * This will look for existing offload driver, if not found, it falls back to
228 + * the default sw hba (fcoe_sw) as its fcoe transport.
229 + *
230 + * Returns: 0 for success
231 + **/
232 +static int fcoe_transport_device_add(struct fcoe_transport *t,
233 + struct net_device *netdev)
234 {
235 - /*
236 - * here setup a interface specific wd time to
237 - * monitor the link state
238 - */
239 - register_netdevice_notifier(&fcoe_notifier);
240 + struct fcoe_transport_internal *ti;
241 +
242 + ti = fcoe_transport_device_lookup(t, netdev);
243 + if (ti) {
244 + printk(KERN_DEBUG "fcoe_transport_device_add:"
245 + "device %s is already added to transport %s\n",
246 + netdev->name, t->name);
247 + return -EEXIST;
248 + }
249 + /* allocate an internal struct to host the netdev and the list */
250 + ti = kzalloc(sizeof(*ti), GFP_KERNEL);
251 + if (!ti)
252 + return -ENOMEM;
253 +
254 + ti->t = t;
255 + ti->netdev = netdev;
256 + INIT_LIST_HEAD(&ti->list);
257 + dev_hold(ti->netdev);
258 +
259 + mutex_lock(&t->devlock);
260 + list_add(&ti->list, &t->devlist);
261 + mutex_unlock(&t->devlock);
262 +
263 + printk(KERN_DEBUG "fcoe_transport_device_add:"
264 + "device %s added to transport %s\n",
265 + netdev->name, t->name);
266 +
267 + return 0;
268 }
269
270 -/*
271 - * function to cleanup link change notification interface
272 - */
273 -static void fcoe_dev_cleanup(void)
274 +/**
275 + * fcoe_transport_device_remove - remove a device from its transport
276 + * @netdev: the netdev the transport to be attached to
277 + *
278 + * this removes the device from the transport so the given transport will
279 + * not manage this device any more
280 + *
281 + * Returns: 0 for success
282 + **/
283 +static int fcoe_transport_device_remove(struct fcoe_transport *t,
284 + struct net_device *netdev)
285 {
286 - unregister_netdevice_notifier(&fcoe_notifier);
287 + struct fcoe_transport_internal *ti;
288 +
289 + ti = fcoe_transport_device_lookup(t, netdev);
290 + if (!ti) {
291 + printk(KERN_DEBUG "fcoe_transport_device_remove:"
292 + "device %s is not managed by transport %s\n",
293 + netdev->name, t->name);
294 + return -ENODEV;
295 + }
296 + mutex_lock(&t->devlock);
297 + list_del(&ti->list);
298 + mutex_unlock(&t->devlock);
299 + printk(KERN_DEBUG "fcoe_transport_device_remove:"
300 + "device %s removed from transport %s\n",
301 + netdev->name, t->name);
302 + dev_put(ti->netdev);
303 + kfree(ti);
304 + return 0;
305 }
306
307 -/*
308 - * This function is called by the ethernet driver
309 - * this is called in case of link change event
310 - */
311 -static int fcoe_device_notification(struct notifier_block *notifier,
312 - ulong event, void *ptr)
313 +/**
314 + * fcoe_transport_device_remove_all - remove all from transport devlist
315 + *
316 + * this removes the device from the transport so the given transport will
317 + * not manage this device any more
318 + *
319 + * Returns: 0 for success
320 + **/
321 +static void fcoe_transport_device_remove_all(struct fcoe_transport *t)
322 {
323 - struct fc_lport *lp = NULL;
324 - struct net_device *real_dev = ptr;
325 - struct fcoe_softc *fc;
326 - struct fcoe_dev_stats *stats;
327 - u16 new_status;
328 - u32 mfs;
329 - int rc = NOTIFY_OK;
330 -
331 - read_lock(&fcoe_hostlist_lock);
332 - list_for_each_entry(fc, &fcoe_hostlist, list) {
333 - if (fc->real_dev == real_dev) {
334 - lp = fc->lp;
335 - break;
336 - }
337 - }
338 - read_unlock(&fcoe_hostlist_lock);
339 - if (lp == NULL) {
340 - rc = NOTIFY_DONE;
341 - goto out;
342 - }
343 + struct fcoe_transport_internal *ti, *tmp;
344
345 - new_status = lp->link_status;
346 - switch (event) {
347 - case NETDEV_DOWN:
348 - case NETDEV_GOING_DOWN:
349 - new_status &= ~FC_LINK_UP;
350 - break;
351 - case NETDEV_UP:
352 - case NETDEV_CHANGE:
353 - new_status &= ~FC_LINK_UP;
354 - if (!fcoe_link_ok(lp))
355 - new_status |= FC_LINK_UP;
356 - break;
357 - case NETDEV_CHANGEMTU:
358 - mfs = fc->real_dev->mtu -
359 - (sizeof(struct fcoe_hdr) +
360 - sizeof(struct fcoe_crc_eof));
361 - if (fc->user_mfs && fc->user_mfs < mfs)
362 - mfs = fc->user_mfs;
363 - if (mfs >= FC_MIN_MAX_FRAME)
364 - fc_set_mfs(lp, mfs);
365 - new_status &= ~FC_LINK_UP;
366 - if (!fcoe_link_ok(lp))
367 - new_status |= FC_LINK_UP;
368 - break;
369 - case NETDEV_REGISTER:
370 - break;
371 - default:
372 - FC_DBG("unknown event %ld call", event);
373 - }
374 - if (lp->link_status != new_status) {
375 - if ((new_status & FC_LINK_UP) == FC_LINK_UP)
376 - fc_linkup(lp);
377 - else {
378 - stats = lp->dev_stats[smp_processor_id()];
379 - if (stats)
380 - stats->LinkFailureCount++;
381 - fc_linkdown(lp);
382 - fcoe_clean_pending_queue(lp);
383 - }
384 + mutex_lock(&t->devlock);
385 + list_for_each_entry_safe(ti, tmp, &t->devlist, list) {
386 + list_del(&ti->list);
387 + kfree(ti);
388 }
389 -out:
390 - return rc;
391 + mutex_unlock(&t->devlock);
392 }
393
394 -static void trimstr(char *str, int len)
395 +/**
396 + * fcoe_transport_match - use the bus device match function to match the hw
397 + * @t: the fcoe transport
398 + * @netdev:
399 + *
400 + * This function is used to check if the givne transport wants to manage the
401 + * input netdev. if the transports implements the match function, it will be
402 + * called, o.w. we just compare the pci vendor and device id.
403 + *
404 + * Returns: true for match up
405 + **/
406 +static bool fcoe_transport_match(struct fcoe_transport *t,
407 + struct net_device *netdev)
408 {
409 - char *cp = str + len;
410 - while (--cp >= str && *cp == '\n')
411 - *cp = '\0';
412 + /* match transport by vendor and device id */
413 + struct pci_dev *pci;
414 +
415 + pci = fcoe_transport_pcidev(netdev);
416 +
417 + if (pci) {
418 + printk(KERN_DEBUG "fcoe_transport_match:"
419 + "%s:%x:%x -- %s:%x:%x\n",
420 + t->name, t->vendor, t->device,
421 + netdev->name, pci->vendor, pci->device);
422 +
423 + /* if transport supports match */
424 + if (t->match)
425 + return t->match(netdev);
426 +
427 + /* else just compare the vendor and device id: pci only */
428 + return (t->vendor == pci->vendor) && (t->device == pci->device);
429 + }
430 + return false;
431 }
432
433 -static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
434 +/**
435 + * fcoe_transport_lookup - check if the transport is already registered
436 + * @t: the transport to be looked up
437 + *
438 + * This compares the parent device (pci) vendor and device id
439 + *
440 + * Returns: NULL if not found
441 + *
442 + * TODO - return default sw transport if no other transport is found
443 + **/
444 +static struct fcoe_transport *fcoe_transport_lookup(
445 + struct net_device *netdev)
446 {
447 - struct net_device *netdev;
448 - char ifname[IFNAMSIZ + 2];
449 - int rc = -ENODEV;
450 -
451 - strlcpy(ifname, buffer, IFNAMSIZ);
452 - trimstr(ifname, strlen(ifname));
453 - netdev = dev_get_by_name(&init_net, ifname);
454 - if (netdev) {
455 - rc = fcoe_destroy_interface(netdev);
456 - dev_put(netdev);
457 + struct fcoe_transport *t;
458 +
459 + mutex_lock(&fcoe_transports_lock);
460 + list_for_each_entry(t, &fcoe_transports, list) {
461 + if (fcoe_transport_match(t, netdev)) {
462 + mutex_unlock(&fcoe_transports_lock);
463 + return t;
464 + }
465 }
466 - return rc;
467 + mutex_unlock(&fcoe_transports_lock);
468 +
469 + printk(KERN_DEBUG "fcoe_transport_lookup:"
470 + "use default transport for %s\n", netdev->name);
471 + return fcoe_transport_default();
472 }
473
474 -static int fcoe_create(const char *buffer, struct kernel_param *kp)
475 +/**
476 + * fcoe_transport_register - adds a fcoe transport to the fcoe transports list
477 + * @t: ptr to the fcoe transport to be added
478 + *
479 + * Returns: 0 for success
480 + **/
481 +int fcoe_transport_register(struct fcoe_transport *t)
482 {
483 - struct net_device *netdev;
484 - char ifname[IFNAMSIZ + 2];
485 - int rc = -ENODEV;
486 -
487 - strlcpy(ifname, buffer, IFNAMSIZ);
488 - trimstr(ifname, strlen(ifname));
489 - netdev = dev_get_by_name(&init_net, ifname);
490 - if (netdev) {
491 - rc = fcoe_create_interface(netdev);
492 - dev_put(netdev);
493 + struct fcoe_transport *tt;
494 +
495 + /* TODO - add fcoe_transport specific initialization here */
496 + mutex_lock(&fcoe_transports_lock);
497 + list_for_each_entry(tt, &fcoe_transports, list) {
498 + if (tt == t) {
499 + mutex_unlock(&fcoe_transports_lock);
500 + return -EEXIST;
501 + }
502 }
503 - return rc;
504 + list_add_tail(&t->list, &fcoe_transports);
505 + mutex_unlock(&fcoe_transports_lock);
506 +
507 + mutex_init(&t->devlock);
508 + INIT_LIST_HEAD(&t->devlist);
509 +
510 + printk(KERN_DEBUG "fcoe_transport_register:%s\n", t->name);
511 +
512 + return 0;
513 }
514 +EXPORT_SYMBOL_GPL(fcoe_transport_register);
515
516 -module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
517 -__MODULE_PARM_TYPE(create, "string");
518 -MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
519 -module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
520 -__MODULE_PARM_TYPE(destroy, "string");
521 -MODULE_PARM_DESC(destroy, "Destroy fcoe port");
522 +/**
523 + * fcoe_transport_unregister - remove the tranport fro the fcoe transports list
524 + * @t: ptr to the fcoe transport to be removed
525 + *
526 + * Returns: 0 for success
527 + **/
528 +int fcoe_transport_unregister(struct fcoe_transport *t)
529 +{
530 + struct fcoe_transport *tt, *tmp;
531 +
532 + mutex_lock(&fcoe_transports_lock);
533 + list_for_each_entry_safe(tt, tmp, &fcoe_transports, list) {
534 + if (tt == t) {
535 + list_del(&t->list);
536 + mutex_unlock(&fcoe_transports_lock);
537 + fcoe_transport_device_remove_all(t);
538 + printk(KERN_DEBUG "fcoe_transport_unregister:%s\n",
539 + t->name);
540 + return 0;
541 + }
542 + }
543 + mutex_unlock(&fcoe_transports_lock);
544 + return -ENODEV;
545 +}
546 +EXPORT_SYMBOL_GPL(fcoe_transport_unregister);
547
548 /*
549 - * Initialization routine
550 - * 1. Will create fc transport software structure
551 - * 2. initialize the link list of port information structure
552 - */
553 -static int __init fcoe_init(void)
554 + * fcoe_load_transport_driver - load an offload driver by alias name
555 + * @netdev: the target net device
556 + *
557 + * Requests for an offload driver module as the fcoe transport, if fails, it
558 + * falls back to use the SW HBA (fcoe_sw) as its transport
559 + *
560 + * TODO -
561 + * 1. supports only PCI device
562 + * 2. needs fix for VLAn and bonding
563 + * 3. pure hw fcoe hba may not have netdev
564 + *
565 + * Returns: 0 for success
566 + **/
567 +int fcoe_load_transport_driver(struct net_device *netdev)
568 {
569 - int cpu;
570 - struct fcoe_percpu_s *p;
571 -
572 - rwlock_init(&fcoe_hostlist_lock);
573 -
574 -#ifdef CONFIG_HOTPLUG_CPU
575 - register_cpu_notifier(&fcoe_cpu_notifier);
576 -#endif /* CONFIG_HOTPLUG_CPU */
577 -
578 - /*
579 - * initialize per CPU interrupt thread
580 - */
581 - for_each_online_cpu(cpu) {
582 - p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
583 - if (p) {
584 - p->thread = kthread_create(fcoe_percpu_receive_thread,
585 - (void *)p,
586 - "fcoethread/%d", cpu);
587 -
588 - /*
589 - * if there is no error then bind the thread to the cpu
590 - * initialize the semaphore and skb queue head
591 - */
592 - if (likely(!IS_ERR(p->thread))) {
593 - p->cpu = cpu;
594 - fcoe_percpu[cpu] = p;
595 - skb_queue_head_init(&p->fcoe_rx_list);
596 - kthread_bind(p->thread, cpu);
597 - wake_up_process(p->thread);
598 - } else {
599 - fcoe_percpu[cpu] = NULL;
600 - kfree(p);
601 -
602 - }
603 - }
604 + struct pci_dev *pci;
605 + struct device *dev = netdev->dev.parent;
606 +
607 + if (fcoe_transport_lookup(netdev)) {
608 + /* load default transport */
609 + printk(KERN_DEBUG "fcoe: already loaded transport for %s\n",
610 + netdev->name);
611 + return -EEXIST;
612 }
613
614 - /*
615 - * setup link change notification
616 - */
617 - fcoe_dev_setup();
618 + pci = to_pci_dev(dev);
619 + if (dev->bus != &pci_bus_type) {
620 + printk(KERN_DEBUG "fcoe: support noly PCI device\n");
621 + return -ENODEV;
622 + }
623 + printk(KERN_DEBUG "fcoe: loading driver fcoe-pci-0x%04x-0x%04x\n",
624 + pci->vendor, pci->device);
625 +
626 + return request_module("fcoe-pci-0x%04x-0x%04x",
627 + pci->vendor, pci->device);
628
629 - init_timer(&fcoe_timer);
630 - fcoe_timer.data = 0;
631 - fcoe_timer.function = fcoe_watchdog;
632 - fcoe_timer.expires = (jiffies + (10 * HZ));
633 - add_timer(&fcoe_timer);
634 +}
635 +EXPORT_SYMBOL_GPL(fcoe_load_transport_driver);
636
637 - if (fcoe_sw_init() != 0) {
638 - FC_DBG("fail to attach fc transport");
639 - return -1;
640 +/**
641 + * fcoe_transport_attach - load transport to fcoe
642 + * @netdev: the netdev the transport to be attached to
643 + *
644 + * This will look for existing offload driver, if not found, it falls back to
645 + * the default sw hba (fcoe_sw) as its fcoe transport.
646 + *
647 + * Returns: 0 for success
648 + **/
649 +int fcoe_transport_attach(struct net_device *netdev)
650 +{
651 + struct fcoe_transport *t;
652 +
653 + /* find the corresponding transport */
654 + t = fcoe_transport_lookup(netdev);
655 + if (!t) {
656 + printk(KERN_DEBUG "fcoe_transport_attach"
657 + ":no transport for %s:use %s\n",
658 + netdev->name, t->name);
659 + return -ENODEV;
660 }
661 + /* add to the transport */
662 + if (fcoe_transport_device_add(t, netdev)) {
663 + printk(KERN_DEBUG "fcoe_transport_attach"
664 + ":failed to add %s to tramsport %s\n",
665 + netdev->name, t->name);
666 + return -EIO;
667 + }
668 + /* transport create function */
669 + if (t->create)
670 + t->create(netdev);
671
672 + printk(KERN_DEBUG "fcoe_transport_attach:transport %s for %s\n",
673 + t->name, netdev->name);
674 return 0;
675 }
676 -module_init(fcoe_init);
677 +EXPORT_SYMBOL_GPL(fcoe_transport_attach);
678
679 -static void __exit fcoe_exit(void)
680 +/**
681 + * fcoe_transport_release - unload transport from fcoe
682 + * @netdev: the net device on which fcoe is to be released
683 + *
684 + * Returns: 0 for success
685 + **/
686 +int fcoe_transport_release(struct net_device *netdev)
687 {
688 - u32 idx;
689 - struct fcoe_softc *fc, *tmp;
690 - struct fcoe_percpu_s *p;
691 - struct sk_buff *skb;
692 -
693 - /*
694 - * Stop all call back interfaces
695 - */
696 -#ifdef CONFIG_HOTPLUG_CPU
697 - unregister_cpu_notifier(&fcoe_cpu_notifier);
698 -#endif /* CONFIG_HOTPLUG_CPU */
699 - fcoe_dev_cleanup();
700 -
701 - /*
702 - * stop timer
703 - */
704 - del_timer_sync(&fcoe_timer);
705 -
706 - /*
707 - * assuming that at this time there will be no
708 - * ioctl in prograss, therefore we do not need to lock the
709 - * list.
710 - */
711 - list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
712 - fcoe_destroy_interface(fc->real_dev);
713 -
714 - for (idx = 0; idx < NR_CPUS; idx++) {
715 - if (fcoe_percpu[idx]) {
716 - kthread_stop(fcoe_percpu[idx]->thread);
717 - p = fcoe_percpu[idx];
718 - spin_lock_bh(&p->fcoe_rx_list.lock);
719 - while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
720 - kfree_skb(skb);
721 - spin_unlock_bh(&p->fcoe_rx_list.lock);
722 - if (fcoe_percpu[idx]->crc_eof_page)
723 - put_page(fcoe_percpu[idx]->crc_eof_page);
724 - kfree(fcoe_percpu[idx]);
725 - }
726 + struct fcoe_transport *t;
727 +
728 + /* find the corresponding transport */
729 + t = fcoe_transport_lookup(netdev);
730 + if (!t) {
731 + printk(KERN_DEBUG "fcoe_transport_release:"
732 + "no transport for %s:use %s\n",
733 + netdev->name, t->name);
734 + return -ENODEV;
735 }
736 + /* remove the device from the transport */
737 + if (fcoe_transport_device_remove(t, netdev)) {
738 + printk(KERN_DEBUG "fcoe_transport_release:"
739 + "failed to add %s to tramsport %s\n",
740 + netdev->name, t->name);
741 + return -EIO;
742 + }
743 + /* transport destroy function */
744 + if (t->destroy)
745 + t->destroy(netdev);
746 +
747 + printk(KERN_DEBUG "fcoe_transport_release:"
748 + "device %s dettached from transport %s\n",
749 + netdev->name, t->name);
750 +
751 + return 0;
752 +}
753 +EXPORT_SYMBOL_GPL(fcoe_transport_release);
754 +
755 +/**
756 + * fcoe_transport_init - initializes fcoe transport layer
757 + *
758 + * This prepares for the fcoe transport layer
759 + *
760 + * Returns: none
761 + **/
762 +int __init fcoe_transport_init(void)
763 +{
764 + INIT_LIST_HEAD(&fcoe_transports);
765 + mutex_init(&fcoe_transports_lock);
766 + return 0;
767 +}
768
769 - fcoe_sw_exit();
770 +/**
771 + * fcoe_transport_exit - cleans up the fcoe transport layer
772 + * This cleans up the fcoe transport layer. removing any transport on the list,
773 + * note that the transport destroy func is not called here.
774 + *
775 + * Returns: none
776 + **/
777 +int __exit fcoe_transport_exit(void)
778 +{
779 + struct fcoe_transport *t, *tmp;
780 +
781 + mutex_lock(&fcoe_transports_lock);
782 + list_for_each_entry_safe(t, tmp, &fcoe_transports, list) {
783 + list_del(&t->list);
784 + mutex_unlock(&fcoe_transports_lock);
785 + fcoe_transport_device_remove_all(t);
786 + mutex_lock(&fcoe_transports_lock);
787 + }
788 + mutex_unlock(&fcoe_transports_lock);
789 + return 0;
790 }
791 -module_exit(fcoe_exit);
792 diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h
793 deleted file mode 100644
794 index b00e14b..0000000
795 --- a/drivers/scsi/fcoe/fcoe_def.h
796 +++ /dev/null
797 @@ -1,92 +0,0 @@
798 -/*
799 - * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
800 - *
801 - * This program is free software; you can redistribute it and/or modify it
802 - * under the terms and conditions of the GNU General Public License,
803 - * version 2, as published by the Free Software Foundation.
804 - *
805 - * This program is distributed in the hope it will be useful, but WITHOUT
806 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
807 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
808 - * more details.
809 - *
810 - * You should have received a copy of the GNU General Public License along with
811 - * this program; if not, write to the Free Software Foundation, Inc.,
812 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
813 - *
814 - * Maintained at www.Open-FCoE.org
815 - */
816 -
817 -#ifndef _FCOE_DEF_H_
818 -#define _FCOE_DEF_H_
819 -
820 -#include <linux/etherdevice.h>
821 -#include <linux/if_ether.h>
822 -
823 -#include <scsi/libfc/libfc.h>
824 -
825 -#include <scsi/fc/fc_fcoe.h>
826 -
827 -#define FCOE_DRIVER_NAME "fcoe" /* driver name for ioctls */
828 -#define FCOE_DRIVER_VENDOR "Open-FC.org" /* vendor name for ioctls */
829 -
830 -#define FCOE_MIN_FRAME 36
831 -#define FCOE_WORD_TO_BYTE 4
832 -
833 -/*
834 - * this is the main common structure across all instance of fcoe driver.
835 - * There is one to one mapping between hba struct and ethernet nic.
836 - * list of hbas contains pointer to the hba struct, these structures are
837 - * stored in this array using there corresponding if_index.
838 - */
839 -
840 -struct fcoe_percpu_s {
841 - int cpu;
842 - struct task_struct *thread;
843 - struct sk_buff_head fcoe_rx_list;
844 - struct page *crc_eof_page;
845 - int crc_eof_offset;
846 -};
847 -
848 -extern struct timer_list fcoe_timer;
849 -extern rwlock_t fcoe_hostlist_lock;
850 -extern struct list_head fcoe_hostlist;
851 -extern struct fcoe_percpu_s *fcoe_percpu[];
852 -
853 -struct fcoe_softc {
854 - struct list_head list;
855 - struct fc_lport *lp;
856 - struct net_device *real_dev;
857 - struct net_device *phys_dev; /* device with ethtool_ops */
858 - struct packet_type fcoe_packet_type;
859 - struct sk_buff_head fcoe_pending_queue;
860 - u16 user_mfs; /* configured max frame size */
861 -
862 - u8 dest_addr[ETH_ALEN];
863 - u8 ctl_src_addr[ETH_ALEN];
864 - u8 data_src_addr[ETH_ALEN];
865 - /*
866 - * fcoe protocol address learning related stuff
867 - */
868 - u16 flogi_oxid;
869 - u8 flogi_progress;
870 - u8 address_mode;
871 -};
872 -
873 -int fcoe_percpu_receive_thread(void *arg);
874 -
875 -/*
876 - * HBA transport ops prototypes
877 - */
878 -void fcoe_clean_pending_queue(struct fc_lport *fd);
879 -void fcoe_watchdog(ulong vp);
880 -int fcoe_destroy_interface(struct net_device *);
881 -int fcoe_create_interface(struct net_device *);
882 -int fcoe_xmit(struct fc_lport *, struct fc_frame *);
883 -int fcoe_rcv(struct sk_buff *, struct net_device *,
884 - struct packet_type *, struct net_device *);
885 -int fcoe_link_ok(struct fc_lport *);
886 -
887 -int __init fcoe_sw_init(void);
888 -void __exit fcoe_sw_exit(void);
889 -#endif /* _FCOE_DEF_H_ */
890 diff --git a/drivers/scsi/fcoe/fcoe_sw.c b/drivers/scsi/fcoe/fcoe_sw.c
891 index d7ceb1b..33aebe5 100644
892 --- a/drivers/scsi/fcoe/fcoe_sw.c
893 +++ b/drivers/scsi/fcoe/fcoe_sw.c
894 @@ -17,19 +17,14 @@
895 * Maintained at www.Open-FCoE.org
896 */
897
898 -/*
899 - * FCOE protocol file
900 - */
901 -
902 #include <linux/module.h>
903 #include <linux/version.h>
904 #include <linux/kernel.h>
905 +#include <linux/pci.h>
906 #include <linux/init.h>
907 #include <linux/spinlock.h>
908 #include <linux/netdevice.h>
909 #include <linux/etherdevice.h>
910 -#include <linux/ethtool.h>
911 -#include <linux/if_ether.h>
912 #include <linux/if_vlan.h>
913 #include <net/rtnetlink.h>
914
915 @@ -39,36 +34,25 @@
916 #include <scsi/scsi_transport.h>
917 #include <scsi/scsi_transport_fc.h>
918
919 -#include <scsi/libfc/libfc.h>
920 -
921 -#include <scsi/fc/fc_fcoe.h>
922 -#include "fcoe_def.h"
923 +#include <scsi/libfc.h>
924 +#include <scsi/libfcoe.h>
925 +#include <scsi/fc_transport_fcoe.h>
926
927 -#define FCOE_VERSION "0.1"
928 +#define FCOE_SW_VERSION "0.1"
929 +#define FCOE_SW_NAME "fcoesw"
930 +#define FCOE_SW_VENDOR "Open-FCoE.org"
931
932 #define FCOE_MAX_LUN 255
933 #define FCOE_MAX_FCP_TARGET 256
934
935 #define FCOE_MAX_OUTSTANDING_COMMANDS 1024
936
937 -#define FCOE_MIN_XID 0x0004
938 -#define FCOE_MAX_XID 0x07ef
939 +#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
940 +#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
941
942 -LIST_HEAD(fcoe_hostlist);
943 -DEFINE_RWLOCK(fcoe_hostlist_lock);
944 -DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
945 -struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
946 -
947 -static struct scsi_transport_template *fcoe_transport_template;
948 -
949 -static int fcoe_reset(struct Scsi_Host *shost)
950 -{
951 - struct fc_lport *lport = shost_priv(shost);
952 - fc_lport_reset(lport);
953 - return 0;
954 -}
955 +static struct scsi_transport_template *scsi_transport_fcoe_sw;
956
957 -struct fc_function_template fcoe_transport_function = {
958 +struct fc_function_template fcoe_sw_transport_function = {
959 .show_host_node_name = 1,
960 .show_host_port_name = 1,
961 .show_host_supported_classes = 1,
962 @@ -101,60 +85,10 @@ struct fc_function_template fcoe_transport_function = {
963 .terminate_rport_io = fc_rport_terminate_io,
964 };
965
966 -static struct fcoe_softc *fcoe_find_fc_lport(const struct net_device *netdev)
967 -{
968 - struct fcoe_softc *fc;
969 -
970 - read_lock(&fcoe_hostlist_lock);
971 - list_for_each_entry(fc, &fcoe_hostlist, list) {
972 - if (fc->real_dev == netdev) {
973 - read_unlock(&fcoe_hostlist_lock);
974 - return fc;
975 - }
976 - }
977 - read_unlock(&fcoe_hostlist_lock);
978 - return NULL;
979 -}
980 -
981 -/*
982 - * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
983 - */
984 -static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
985 - unsigned int scheme, unsigned int port)
986 -{
987 - u64 wwn;
988 - u64 host_mac;
989 -
990 - /* The MAC is in NO, so flip only the low 48 bits */
991 - host_mac = ((u64) mac[0] << 40) |
992 - ((u64) mac[1] << 32) |
993 - ((u64) mac[2] << 24) |
994 - ((u64) mac[3] << 16) |
995 - ((u64) mac[4] << 8) |
996 - (u64) mac[5];
997 -
998 - WARN_ON(host_mac >= (1ULL << 48));
999 - wwn = host_mac | ((u64) scheme << 60);
1000 - switch (scheme) {
1001 - case 1:
1002 - WARN_ON(port != 0);
1003 - break;
1004 - case 2:
1005 - WARN_ON(port >= 0xfff);
1006 - wwn |= (u64) port << 48;
1007 - break;
1008 - default:
1009 - WARN_ON(1);
1010 - break;
1011 - }
1012 -
1013 - return wwn;
1014 -}
1015 -
1016 -static struct scsi_host_template fcoe_driver_template = {
1017 +static struct scsi_host_template fcoe_sw_shost_template = {
1018 .module = THIS_MODULE,
1019 .name = "FCoE Driver",
1020 - .proc_name = FCOE_DRIVER_NAME,
1021 + .proc_name = FCOE_SW_NAME,
1022 .queuecommand = fc_queuecommand,
1023 .eh_abort_handler = fc_eh_abort,
1024 .eh_device_reset_handler = fc_eh_device_reset,
1025 @@ -170,138 +104,18 @@ static struct scsi_host_template fcoe_driver_template = {
1026 .max_sectors = 0xffff,
1027 };
1028
1029 -int fcoe_destroy_interface(struct net_device *netdev)
1030 -{
1031 - int cpu, idx;
1032 - struct fcoe_percpu_s *pp;
1033 - struct fcoe_softc *fc;
1034 - struct fcoe_rcv_info *fr;
1035 - struct sk_buff_head *list;
1036 - struct sk_buff *skb, *next;
1037 - struct sk_buff *head;
1038 - struct fc_lport *lp;
1039 - u8 flogi_maddr[ETH_ALEN];
1040 -
1041 - fc = fcoe_find_fc_lport(netdev);
1042 - if (!fc)
1043 - return -ENODEV;
1044 -
1045 - lp = fc->lp;
1046 -
1047 - /* Remove the instance from fcoe's list */
1048 - write_lock_bh(&fcoe_hostlist_lock);
1049 - list_del(&fc->list);
1050 - write_unlock_bh(&fcoe_hostlist_lock);
1051 -
1052 - /* Don't listen for Ethernet packets anymore */
1053 - dev_remove_pack(&fc->fcoe_packet_type);
1054 -
1055 - /* Detach from the scsi-ml */
1056 - fc_remove_host(lp->host);
1057 - scsi_remove_host(lp->host);
1058 -
1059 - /* Cleanup the fc_lport */
1060 - fc_lport_destroy(lp);
1061 - fc_fcp_destroy(lp);
1062 - if (lp->emp)
1063 - fc_exch_mgr_free(lp->emp);
1064 -
1065 - /* Delete secondary MAC addresses */
1066 - rtnl_lock();
1067 - memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1068 - dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1069 - if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1070 - dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1071 - rtnl_unlock();
1072 -
1073 - /* Free the per-CPU revieve threads */
1074 - for (idx = 0; idx < NR_CPUS; idx++) {
1075 - if (fcoe_percpu[idx]) {
1076 - pp = fcoe_percpu[idx];
1077 - spin_lock_bh(&pp->fcoe_rx_list.lock);
1078 - list = &pp->fcoe_rx_list;
1079 - head = list->next;
1080 - for (skb = head; skb != (struct sk_buff *)list;
1081 - skb = next) {
1082 - next = skb->next;
1083 - fr = fcoe_dev_from_skb(skb);
1084 - if (fr->fr_dev == fc->lp) {
1085 - __skb_unlink(skb, list);
1086 - kfree_skb(skb);
1087 - }
1088 - }
1089 - spin_unlock_bh(&pp->fcoe_rx_list.lock);
1090 - }
1091 - }
1092 -
1093 - /* Free existing skbs */
1094 - fcoe_clean_pending_queue(lp);
1095 -
1096 - /* Free memory used by statistical counters */
1097 - for_each_online_cpu(cpu)
1098 - kfree(lp->dev_stats[cpu]);
1099 -
1100 - /* Release the net_device and Scsi_Host */
1101 - dev_put(fc->real_dev);
1102 - scsi_host_put(lp->host);
1103 - return 0;
1104 -}
1105 -
1106 /*
1107 - * Return zero if link is OK for use by FCoE.
1108 - * Any permanently-disqualifying conditions have been previously checked.
1109 - * This also updates the speed setting, which may change with link for 100/1000.
1110 + * fcoe_sw_lport_config - sets up the fc_lport
1111 + * @lp: ptr to the fc_lport
1112 + * @shost: ptr to the parent scsi host
1113 + *
1114 + * Returns: 0 for success
1115 *
1116 - * This function should probably be checking for PAUSE support at some point
1117 - * in the future. Currently Per-priority-pause is not determinable using
1118 - * ethtool, so we shouldn't be restrictive until that problem is resolved.
1119 */
1120 -int fcoe_link_ok(struct fc_lport *lp)
1121 -{
1122 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1123 - struct net_device *dev = fc->real_dev;
1124 - struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1125 - int rc = 0;
1126 -
1127 - if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
1128 - dev = fc->phys_dev;
1129 - if (dev->ethtool_ops->get_settings) {
1130 - dev->ethtool_ops->get_settings(dev, &ecmd);
1131 - lp->link_supported_speeds &=
1132 - ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1133 - if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1134 - SUPPORTED_1000baseT_Full))
1135 - lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1136 - if (ecmd.supported & SUPPORTED_10000baseT_Full)
1137 - lp->link_supported_speeds |=
1138 - FC_PORTSPEED_10GBIT;
1139 - if (ecmd.speed == SPEED_1000)
1140 - lp->link_speed = FC_PORTSPEED_1GBIT;
1141 - if (ecmd.speed == SPEED_10000)
1142 - lp->link_speed = FC_PORTSPEED_10GBIT;
1143 - }
1144 - } else
1145 - rc = -1;
1146 -
1147 - return rc;
1148 -}
1149 -
1150 -static struct libfc_function_template fcoe_libfc_fcn_templ = {
1151 - .frame_send = fcoe_xmit,
1152 -};
1153 -
1154 -static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1155 +static int fcoe_sw_lport_config(struct fc_lport *lp)
1156 {
1157 int i = 0;
1158
1159 - lp->host = shost;
1160 - lp->drv_priv = (void *)(lp + 1);
1161 -
1162 - lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1163 - FCOE_MIN_XID, FCOE_MAX_XID);
1164 - if (!lp->emp)
1165 - return -ENOMEM;
1166 -
1167 lp->link_status = 0;
1168 lp->max_retry_count = 3;
1169 lp->e_d_tov = 2 * 1000; /* FC-FS default */
1170 @@ -316,25 +130,39 @@ static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
1171 lp->dev_stats[i] = kzalloc(sizeof(struct fcoe_dev_stats),
1172 GFP_KERNEL);
1173
1174 - /* Finish fc_lport configuration */
1175 + /* lport fc_lport related configuration */
1176 fc_lport_config(lp);
1177
1178 return 0;
1179 }
1180
1181 -static int net_config(struct fc_lport *lp)
1182 +/*
1183 + * fcoe_sw_netdev_config - sets up fcoe_softc for lport and network
1184 + * related properties
1185 + * @lp : ptr to the fc_lport
1186 + * @netdev : ptr to the associated netdevice struct
1187 + *
1188 + * Must be called after fcoe_sw_lport_config() as it will use lport mutex
1189 + *
1190 + * Returns : 0 for success
1191 + *
1192 + */
1193 +static int fcoe_sw_netdev_config(struct fc_lport *lp, struct net_device *netdev)
1194 {
1195 u32 mfs;
1196 u64 wwnn, wwpn;
1197 - struct net_device *net_dev;
1198 - struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
1199 + struct fcoe_softc *fc;
1200 u8 flogi_maddr[ETH_ALEN];
1201
1202 + /* Setup lport private data to point to fcoe softc */
1203 + fc = lport_priv(lp);
1204 + fc->lp = lp;
1205 + fc->real_dev = netdev;
1206 + fc->phys_dev = netdev;
1207 +
1208 /* Require support for get_pauseparam ethtool op. */
1209 - net_dev = fc->real_dev;
1210 - if (net_dev->priv_flags & IFF_802_1Q_VLAN)
1211 - net_dev = vlan_dev_real_dev(net_dev);
1212 - fc->phys_dev = net_dev;
1213 + if (netdev->priv_flags & IFF_802_1Q_VLAN)
1214 + fc->phys_dev = vlan_dev_real_dev(netdev);
1215
1216 /* Do not support for bonding device */
1217 if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
1218 @@ -356,6 +184,7 @@ static int net_config(struct fc_lport *lp)
1219 if (!fcoe_link_ok(lp))
1220 lp->link_status |= FC_LINK_UP;
1221
1222 + /* offload features support */
1223 if (fc->real_dev->features & NETIF_F_SG)
1224 lp->sg_supp = 1;
1225
1226 @@ -394,96 +223,210 @@ static int net_config(struct fc_lport *lp)
1227 return 0;
1228 }
1229
1230 -static void shost_config(struct fc_lport *lp)
1231 +/*
1232 + * fcoe_sw_shost_config - sets up fc_lport->host
1233 + * @lp : ptr to the fc_lport
1234 + * @shost : ptr to the associated scsi host
1235 + * @dev : device associated to scsi host
1236 + *
1237 + * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
1238 + *
1239 + * Returns : 0 for success
1240 + *
1241 + */
1242 +static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
1243 + struct device *dev)
1244 {
1245 + int rc = 0;
1246 +
1247 + /* lport scsi host config */
1248 + lp->host = shost;
1249 +
1250 lp->host->max_lun = FCOE_MAX_LUN;
1251 lp->host->max_id = FCOE_MAX_FCP_TARGET;
1252 lp->host->max_channel = 0;
1253 - lp->host->transportt = fcoe_transport_template;
1254 + lp->host->transportt = scsi_transport_fcoe_sw;
1255 +
1256 + /* add the new host to the SCSI-ml */
1257 + rc = scsi_add_host(lp->host, dev);
1258 + if (rc) {
1259 + FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
1260 + return rc;
1261 + }
1262 + sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
1263 + FCOE_SW_NAME, FCOE_SW_VERSION,
1264 + fcoe_netdev(lp)->name);
1265 +
1266 + return 0;
1267 }
1268
1269 -static int libfc_config(struct fc_lport *lp)
1270 +/*
1271 + * fcoe_sw_em_config - allocates em for this lport
1272 + * @lp: the port that em is to allocated for
1273 + *
1274 + * Returns : 0 on success
1275 + */
1276 +static inline int fcoe_sw_em_config(struct fc_lport *lp)
1277 {
1278 - /* Set the function pointers set by the LLDD */
1279 - memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
1280 - sizeof(struct libfc_function_template));
1281 + BUG_ON(lp->emp);
1282
1283 - if (fc_fcp_init(lp))
1284 + lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
1285 + FCOE_MIN_XID, FCOE_MAX_XID);
1286 + if (!lp->emp)
1287 return -ENOMEM;
1288 - fc_exch_init(lp);
1289 - fc_lport_init(lp);
1290 - fc_rport_init(lp);
1291 - fc_disc_init(lp);
1292
1293 return 0;
1294 }
1295
1296 /*
1297 - * This function creates the fcoe interface
1298 - * create struct fcdev which is a shared structure between opefc
1299 - * and transport level protocol.
1300 + * fcoe_sw_destroy - FCoE software HBA tear-down function
1301 + * @netdev: ptr to the associated net_device
1302 + *
1303 + * Returns: 0 if link is OK for use by FCoE.
1304 */
1305 -int fcoe_create_interface(struct net_device *netdev)
1306 +static int fcoe_sw_destroy(struct net_device *netdev)
1307 {
1308 + int cpu;
1309 + struct fc_lport *lp = NULL;
1310 + struct fcoe_softc *fc;
1311 + u8 flogi_maddr[ETH_ALEN];
1312 +
1313 + BUG_ON(!netdev);
1314 +
1315 + printk(KERN_DEBUG "fcoe_sw_destroy:interface on %s\n",
1316 + netdev->name);
1317 +
1318 + lp = fcoe_hostlist_lookup(netdev);
1319 + if (!lp)
1320 + return -ENODEV;
1321 +
1322 + fc = fcoe_softc(lp);
1323 +
1324 + /* Remove the instance from fcoe's list */
1325 + fcoe_hostlist_remove(lp);
1326 +
1327 + /* Don't listen for Ethernet packets anymore */
1328 + dev_remove_pack(&fc->fcoe_packet_type);
1329 +
1330 + /* Cleanup the fc_lport */
1331 + fc_lport_destroy(lp);
1332 + fc_fcp_destroy(lp);
1333 +
1334 + /* Detach from the scsi-ml */
1335 + fc_remove_host(lp->host);
1336 + scsi_remove_host(lp->host);
1337 +
1338 + /* There are no more rports or I/O, free the EM */
1339 + if (lp->emp)
1340 + fc_exch_mgr_free(lp->emp);
1341 +
1342 + /* Delete secondary MAC addresses */
1343 + rtnl_lock();
1344 + memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
1345 + dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
1346 + if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
1347 + dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
1348 + rtnl_unlock();
1349 +
1350 + /* Free the per-CPU revieve threads */
1351 + fcoe_percpu_clean(lp);
1352 +
1353 + /* Free existing skbs */
1354 + fcoe_clean_pending_queue(lp);
1355 +
1356 + /* Free memory used by statistical counters */
1357 + for_each_online_cpu(cpu)
1358 + kfree(lp->dev_stats[cpu]);
1359 +
1360 + /* Release the net_device and Scsi_Host */
1361 + dev_put(fc->real_dev);
1362 + scsi_host_put(lp->host);
1363 +
1364 + return 0;
1365 +}
1366 +
1367 +static struct libfc_function_template fcoe_sw_libfc_fcn_templ = {
1368 + .frame_send = fcoe_xmit,
1369 +};
1370 +
1371 +/*
1372 + * fcoe_sw_create - this function creates the fcoe interface
1373 + * @netdev: pointer the associated netdevice
1374 + *
1375 + * Creates fc_lport struct and scsi_host for lport, configures lport
1376 + * and starts fabric login.
1377 + *
1378 + * Returns : 0 on success
1379 + */
1380 +static int fcoe_sw_create(struct net_device *netdev)
1381 +{
1382 + int rc;
1383 struct fc_lport *lp = NULL;
1384 struct fcoe_softc *fc;
1385 struct Scsi_Host *shost;
1386 - int rc = 0;
1387
1388 - if (fcoe_find_fc_lport(netdev) != NULL)
1389 + BUG_ON(!netdev);
1390 +
1391 + printk(KERN_DEBUG "fcoe_sw_create:interface on %s\n",
1392 + netdev->name);
1393 +
1394 + lp = fcoe_hostlist_lookup(netdev);
1395 + if (lp)
1396 return -EEXIST;
1397
1398 - shost = scsi_host_alloc(&fcoe_driver_template,
1399 - sizeof(struct fc_lport) +
1400 + shost = fcoe_host_alloc(&fcoe_sw_shost_template,
1401 sizeof(struct fcoe_softc));
1402 -
1403 if (!shost) {
1404 FC_DBG("Could not allocate host structure\n");
1405 return -ENOMEM;
1406 }
1407 -
1408 lp = shost_priv(shost);
1409 - rc = lport_config(lp, shost);
1410 - if (rc)
1411 - goto out_host_put;
1412 -
1413 - /* Configure the fcoe_softc */
1414 - fc = (struct fcoe_softc *)lp->drv_priv;
1415 - fc->lp = lp;
1416 - fc->real_dev = netdev;
1417 - shost_config(lp);
1418 + fc = lport_priv(lp);
1419
1420 + /* configure fc_lport, e.g., em */
1421 + rc = fcoe_sw_lport_config(lp);
1422 + if (rc) {
1423 + FC_DBG("Could not configure lport\n");
1424 + goto out_host_put;
1425 + }
1426
1427 - /* Add the new host to the SCSI-ml */
1428 - rc = scsi_add_host(lp->host, NULL);
1429 + /* configure lport network properties */
1430 + rc = fcoe_sw_netdev_config(lp, netdev);
1431 if (rc) {
1432 - FC_DBG("error on scsi_add_host\n");
1433 - goto out_lp_destroy;
1434 + FC_DBG("Could not configure netdev for lport\n");
1435 + goto out_host_put;
1436 }
1437
1438 - sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
1439 - FCOE_DRIVER_NAME, FCOE_VERSION,
1440 - netdev->name);
1441 + /* configure lport scsi host properties */
1442 + rc = fcoe_sw_shost_config(lp, shost, &netdev->dev);
1443 + if (rc) {
1444 + FC_DBG("Could not configure shost for lport\n");
1445 + goto out_host_put;
1446 + }
1447
1448 - /* Configure netdev and networking properties of the lp */
1449 - rc = net_config(lp);
1450 - if (rc)
1451 - goto out_lp_destroy;
1452 + /* lport exch manager allocation */
1453 + rc = fcoe_sw_em_config(lp);
1454 + if (rc) {
1455 + FC_DBG("Could not configure em for lport\n");
1456 + goto out_host_put;
1457 + }
1458
1459 /* Initialize the library */
1460 - rc = libfc_config(lp);
1461 - if (rc)
1462 + rc = fcoe_libfc_config(lp, &fcoe_sw_libfc_fcn_templ);
1463 + if (rc) {
1464 + FC_DBG("Could not configure libfc for lport!\n");
1465 goto out_lp_destroy;
1466 + }
1467
1468 - write_lock_bh(&fcoe_hostlist_lock);
1469 - list_add_tail(&fc->list, &fcoe_hostlist);
1470 - write_unlock_bh(&fcoe_hostlist_lock);
1471 + /* add to lports list */
1472 + fcoe_hostlist_add(lp);
1473
1474 lp->boot_time = jiffies;
1475
1476 fc_fabric_login(lp);
1477
1478 dev_hold(netdev);
1479 +
1480 return rc;
1481
1482 out_lp_destroy:
1483 @@ -493,28 +436,55 @@ out_host_put:
1484 return rc;
1485 }
1486
1487 -void fcoe_clean_pending_queue(struct fc_lport *lp)
1488 +/*
1489 + * fcoe_sw_match - the fcoe sw transport match function
1490 + *
1491 + * Returns : false always
1492 + */
1493 +static bool fcoe_sw_match(struct net_device *netdev)
1494 {
1495 - struct fcoe_softc *fc = lp->drv_priv;
1496 - struct sk_buff *skb;
1497 -
1498 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1499 - while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
1500 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1501 - kfree_skb(skb);
1502 - spin_lock_bh(&fc->fcoe_pending_queue.lock);
1503 - }
1504 - spin_unlock_bh(&fc->fcoe_pending_queue.lock);
1505 + /* FIXME - for sw transport, always return false */
1506 + return false;
1507 }
1508
1509 +/* the sw hba fcoe transport */
1510 +struct fcoe_transport fcoe_sw_transport = {
1511 + .name = "fcoesw",
1512 + .create = fcoe_sw_create,
1513 + .destroy = fcoe_sw_destroy,
1514 + .match = fcoe_sw_match,
1515 + .vendor = 0x0,
1516 + .device = 0xffff,
1517 +};
1518 +
1519 +/*
1520 + * fcoe_sw_init - registers fcoe_sw_transport
1521 + *
1522 + * Returns : 0 on success
1523 + */
1524 int __init fcoe_sw_init(void)
1525 {
1526 - fcoe_transport_template =
1527 - fc_attach_transport(&fcoe_transport_function);
1528 - return fcoe_transport_template ? 0 : -1;
1529 + /* attach to scsi transport */
1530 + scsi_transport_fcoe_sw =
1531 + fc_attach_transport(&fcoe_sw_transport_function);
1532 + if (!scsi_transport_fcoe_sw) {
1533 + printk(KERN_ERR "fcoe_sw_init:fc_attach_transport() failed\n");
1534 + return -ENODEV;
1535 + }
1536 + /* register sw transport */
1537 + fcoe_transport_register(&fcoe_sw_transport);
1538 + return 0;
1539 }
1540
1541 -void __exit fcoe_sw_exit(void)
1542 +/*
1543 + * fcoe_sw_exit - unregisters fcoe_sw_transport
1544 + *
1545 + * Returns : 0 on success
1546 + */
1547 +int __exit fcoe_sw_exit(void)
1548 {
1549 - fc_release_transport(fcoe_transport_template);
1550 + /* dettach the transport */
1551 + fc_release_transport(scsi_transport_fcoe_sw);
1552 + fcoe_transport_unregister(&fcoe_sw_transport);
1553 + return 0;
1554 }
1555 diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
1556 index 45a7d6f..de29ccd 100644
1557 --- a/drivers/scsi/fcoe/libfcoe.c
1558 +++ b/drivers/scsi/fcoe/libfcoe.c
1559 @@ -17,10 +17,6 @@
1560 * Maintained at www.Open-FCoE.org
1561 */
1562
1563 -/*
1564 - * FCOE protocol file
1565 - */
1566 -
1567 #include <linux/module.h>
1568 #include <linux/version.h>
1569 #include <linux/kernel.h>
1570 @@ -28,9 +24,15 @@
1571 #include <linux/skbuff.h>
1572 #include <linux/netdevice.h>
1573 #include <linux/etherdevice.h>
1574 +#include <linux/ethtool.h>
1575 #include <linux/if_ether.h>
1576 +#include <linux/if_vlan.h>
1577 #include <linux/kthread.h>
1578 #include <linux/crc32.h>
1579 +#include <linux/cpu.h>
1580 +#include <linux/fs.h>
1581 +#include <linux/sysfs.h>
1582 +#include <linux/ctype.h>
1583 #include <scsi/scsi_tcq.h>
1584 #include <scsi/scsicam.h>
1585 #include <scsi/scsi_transport.h>
1586 @@ -39,11 +41,10 @@
1587
1588 #include <scsi/fc/fc_encaps.h>
1589
1590 -#include <scsi/libfc/libfc.h>
1591 -#include <scsi/libfc/fc_frame.h>
1592 -
1593 -#include <scsi/fc/fc_fcoe.h>
1594 -#include "fcoe_def.h"
1595 +#include <scsi/libfc.h>
1596 +#include <scsi/fc_frame.h>
1597 +#include <scsi/libfcoe.h>
1598 +#include <scsi/fc_transport_fcoe.h>
1599
1600 static int debug_fcoe;
1601
1602 @@ -53,18 +54,129 @@ static int debug_fcoe;
1603 #define FCOE_GW_ADDR_MODE 0x00
1604 #define FCOE_FCOUI_ADDR_MODE 0x01
1605
1606 +#define FCOE_WORD_TO_BYTE 4
1607 +
1608 +MODULE_AUTHOR("Open-FCoE.org");
1609 +MODULE_DESCRIPTION("FCoE");
1610 +MODULE_LICENSE("GPL");
1611 +MODULE_VERSION("1.0.4");
1612 +
1613 +/* fcoe host list */
1614 +LIST_HEAD(fcoe_hostlist);
1615 +DEFINE_RWLOCK(fcoe_hostlist_lock);
1616 +DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
1617 +struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
1618 +
1619 +
1620 /* Function Prototyes */
1621 static int fcoe_check_wait_queue(struct fc_lport *);
1622 static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
1623 static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
1624 static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
1625 +#ifdef CONFIG_HOTPLUG_CPU
1626 +static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
1627 +#endif /* CONFIG_HOTPLUG_CPU */
1628 +static int fcoe_device_notification(struct notifier_block *, ulong, void *);
1629 +static void fcoe_dev_setup(void);
1630 +static void fcoe_dev_cleanup(void);
1631 +
1632 +/* notification function from net device */
1633 +static struct notifier_block fcoe_notifier = {
1634 + .notifier_call = fcoe_device_notification,
1635 +};
1636 +
1637 +
1638 +#ifdef CONFIG_HOTPLUG_CPU
1639 +static struct notifier_block fcoe_cpu_notifier = {
1640 + .notifier_call = fcoe_cpu_callback,
1641 +};
1642 +
1643 +/**
1644 + * fcoe_create_percpu_data - creates the associated cpu data
1645 + * @cpu: index for the cpu where fcoe cpu data will be created
1646 + *
1647 + * create percpu stats block, from cpu add notifier
1648 + *
1649 + * Returns: none
1650 + **/
1651 +static void fcoe_create_percpu_data(int cpu)
1652 +{
1653 + struct fc_lport *lp;
1654 + struct fcoe_softc *fc;
1655
1656 -/*
1657 - * this is the fcoe receive function
1658 - * called by NET_RX_SOFTIRQ
1659 - * this function will receive the packet and
1660 - * build fc frame and pass it up
1661 - */
1662 + write_lock_bh(&fcoe_hostlist_lock);
1663 + list_for_each_entry(fc, &fcoe_hostlist, list) {
1664 + lp = fc->lp;
1665 + if (lp->dev_stats[cpu] == NULL)
1666 + lp->dev_stats[cpu] =
1667 + kzalloc(sizeof(struct fcoe_dev_stats),
1668 + GFP_KERNEL);
1669 + }
1670 + write_unlock_bh(&fcoe_hostlist_lock);
1671 +}
1672 +
1673 +/**
1674 + * fcoe_destroy_percpu_data - destroys the associated cpu data
1675 + * @cpu: index for the cpu where fcoe cpu data will destroyed
1676 + *
1677 + * destroy percpu stats block called by cpu add/remove notifier
1678 + *
1679 + * Retuns: none
1680 + **/
1681 +static void fcoe_destroy_percpu_data(int cpu)
1682 +{
1683 + struct fc_lport *lp;
1684 + struct fcoe_softc *fc;
1685 +
1686 + write_lock_bh(&fcoe_hostlist_lock);
1687 + list_for_each_entry(fc, &fcoe_hostlist, list) {
1688 + lp = fc->lp;
1689 + kfree(lp->dev_stats[cpu]);
1690 + lp->dev_stats[cpu] = NULL;
1691 + }
1692 + write_unlock_bh(&fcoe_hostlist_lock);
1693 +}
1694 +
1695 +/**
1696 + * fcoe_cpu_callback - fcoe cpu hotplug event callback
1697 + * @nfb: callback data block
1698 + * @action: event triggering the callback
1699 + * @hcpu: index for the cpu of this event
1700 + *
1701 + * this creates or destroys per cpu data for fcoe
1702 + *
1703 + * Returns NOTIFY_OK always.
1704 + **/
1705 +static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
1706 + void *hcpu)
1707 +{
1708 + unsigned int cpu = (unsigned long)hcpu;
1709 +
1710 + switch (action) {
1711 + case CPU_ONLINE:
1712 + fcoe_create_percpu_data(cpu);
1713 + break;
1714 + case CPU_DEAD:
1715 + fcoe_destroy_percpu_data(cpu);
1716 + break;
1717 + default:
1718 + break;
1719 + }
1720 + return NOTIFY_OK;
1721 +}
1722 +#endif /* CONFIG_HOTPLUG_CPU */
1723 +
1724 +/**
1725 + * foce_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
1726 + * @skb: the receive skb
1727 + * @dev: associated net device
1728 + * @ptype: context
1729 + * @odldev: last device
1730 + *
1731 + * this function will receive the packet and build fc frame and pass it up
1732 + *
1733 + * Returns: 0 for success
1734 + **/
1735 int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
1736 struct packet_type *ptype, struct net_device *olddev)
1737 {
1738 @@ -142,7 +254,14 @@ err2:
1739 kfree_skb(skb);
1740 return -1;
1741 }
1742 +EXPORT_SYMBOL_GPL(fcoe_rcv);
1743
1744 +/**
1745 + * fcoe_start_io - pass to netdev to start xmit for fcoe
1746 + * @skb: the skb to be xmitted
1747 + *
1748 + * Returns: 0 for success
1749 + **/
1750 static inline int fcoe_start_io(struct sk_buff *skb)
1751 {
1752 int rc;
1753 @@ -155,6 +274,13 @@ static inline int fcoe_start_io(struct sk_buff *skb)
1754 return 0;
1755 }
1756
1757 +/**
1758 + * fcoe_get_paged_crc_eof - in case we need alloc a page for crc_eof
1759 + * @skb: the skb to be xmitted
1760 + * @tlen: total len
1761 + *
1762 + * Returns: 0 for success
1763 + **/
1764 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1765 {
1766 struct fcoe_percpu_s *fps;
1767 @@ -191,12 +317,53 @@ static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1768 return 0;
1769 }
1770
1771 -/*
1772 - * this is the frame xmit routine
1773 - */
1774 +/**
1775 + * fcoe_fc_crc - calculates FC CRC in this fcoe skb
1776 + * @fp: the fc_frame containg data to be checksummed
1777 + *
1778 + * This uses crc32() to calculate the crc for fc frame
1779 + * Return : 32 bit crc
1780 + *
1781 + **/
1782 +u32 fcoe_fc_crc(struct fc_frame *fp)
1783 +{
1784 + struct sk_buff *skb = fp_skb(fp);
1785 + struct skb_frag_struct *frag;
1786 + unsigned char *data;
1787 + unsigned long off, len, clen;
1788 + u32 crc;
1789 + unsigned i;
1790 +
1791 + crc = crc32(~0, skb->data, skb_headlen(skb));
1792 +
1793 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1794 + frag = &skb_shinfo(skb)->frags[i];
1795 + off = frag->page_offset;
1796 + len = frag->size;
1797 + while (len > 0) {
1798 + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1799 + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1800 + KM_SKB_DATA_SOFTIRQ);
1801 + crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1802 + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1803 + off += clen;
1804 + len -= clen;
1805 + }
1806 + }
1807 + return crc;
1808 +}
1809 +EXPORT_SYMBOL_GPL(fcoe_fc_crc);
1810 +
1811 +/**
1812 + * fcoe_xmit - FCoE frame transmit function
1813 + * @lp: the associated local port
1814 + * @fp: the fc_frame to be transmitted
1815 + *
1816 + * Return : 0 for success
1817 + *
1818 + **/
1819 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1820 {
1821 - int indx;
1822 int wlen, rc = 0;
1823 u32 crc;
1824 struct ethhdr *eh;
1825 @@ -206,15 +373,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1826 struct fc_frame_header *fh;
1827 unsigned int hlen; /* header length implies the version */
1828 unsigned int tlen; /* trailer length */
1829 + unsigned int elen; /* eth header, may include vlan */
1830 int flogi_in_progress = 0;
1831 struct fcoe_softc *fc;
1832 - void *data;
1833 u8 sof, eof;
1834 struct fcoe_hdr *hp;
1835
1836 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1837
1838 - fc = (struct fcoe_softc *)lp->drv_priv;
1839 + fc = fcoe_softc(lp);
1840 /*
1841 * if it is a flogi then we need to learn gw-addr
1842 * and my own fcid
1843 @@ -243,45 +410,24 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1844 sof = fr_sof(fp);
1845 eof = fr_eof(fp);
1846
1847 - crc = ~0;
1848 - crc = crc32(crc, skb->data, skb_headlen(skb));
1849 -
1850 - for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
1851 - skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
1852 - unsigned long off = frag->page_offset;
1853 - unsigned long len = frag->size;
1854 -
1855 - while (len > 0) {
1856 - unsigned long clen;
1857 -
1858 - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1859 - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1860 - KM_SKB_DATA_SOFTIRQ);
1861 - crc = crc32(crc, data + (off & ~PAGE_MASK),
1862 - clen);
1863 - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1864 - off += clen;
1865 - len -= clen;
1866 - }
1867 - }
1868 -
1869 - /*
1870 - * Get header and trailer lengths.
1871 - * This is temporary code until we get rid of the old protocol.
1872 - * Both versions have essentially the same trailer layout but T11
1873 - * has padding afterwards.
1874 - */
1875 + elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
1876 + sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
1877 hlen = sizeof(struct fcoe_hdr);
1878 tlen = sizeof(struct fcoe_crc_eof);
1879 + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1880
1881 - /*
1882 - * copy fc crc and eof to the skb buff
1883 - * Use utility buffer in the fc_frame part of the sk_buff for the
1884 - * trailer.
1885 - * We don't do a get_page for this frag, since that page may not be
1886 - * managed that way. So that skb_free() doesn't do that either, we
1887 - * setup the destructor to remove this frag.
1888 - */
1889 + /* crc offload */
1890 + if (likely(lp->crc_offload)) {
1891 + skb->ip_summed = CHECKSUM_COMPLETE;
1892 + skb->csum_start = skb_headroom(skb);
1893 + skb->csum_offset = skb->len;
1894 + crc = 0;
1895 + } else {
1896 + skb->ip_summed = CHECKSUM_NONE;
1897 + crc = fcoe_fc_crc(fp);
1898 + }
1899 +
1900 + /* copy fc crc and eof to the skb buff */
1901 if (skb_is_nonlinear(skb)) {
1902 skb_frag_t *frag;
1903 if (fcoe_get_paged_crc_eof(skb, tlen)) {
1904 @@ -295,22 +441,27 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1905 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1906 }
1907
1908 + memset(cp, 0, sizeof(*cp));
1909 cp->fcoe_eof = eof;
1910 cp->fcoe_crc32 = cpu_to_le32(~crc);
1911 - if (tlen == sizeof(*cp))
1912 - memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
1913 - wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1914
1915 if (skb_is_nonlinear(skb)) {
1916 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1917 cp = NULL;
1918 }
1919
1920 - /*
1921 - * Fill in the control structures
1922 - */
1923 - skb->ip_summed = CHECKSUM_NONE;
1924 - eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
1925 + /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */
1926 + skb_push(skb, elen + hlen);
1927 + skb_reset_mac_header(skb);
1928 + skb_set_network_header(skb, elen);
1929 + skb_set_transport_header(skb, elen + hlen);
1930 + skb->mac_len = elen;
1931 + skb->protocol = htons(ETH_P_FCOE);
1932 + skb->dev = fc->real_dev;
1933 +
1934 + /* fill up mac and fcoe headers */
1935 + eh = eth_hdr(skb);
1936 + eh->h_proto = htons(ETH_P_FCOE);
1937 if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
1938 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1939 else
1940 @@ -322,24 +473,20 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1941 else
1942 memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
1943
1944 - eh->h_proto = htons(ETH_P_FCOE);
1945 - skb->protocol = htons(ETH_P_802_3);
1946 - skb_reset_mac_header(skb);
1947 - skb_reset_network_header(skb);
1948 -
1949 - hp = (struct fcoe_hdr *)(eh + 1);
1950 + hp = (struct fcoe_hdr *)skb_network_header(skb);
1951 memset(hp, 0, sizeof(*hp));
1952 if (FC_FCOE_VER)
1953 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1954 hp->fcoe_sof = sof;
1955
1956 + /* update tx stats: regardless if LLD fails */
1957 stats = lp->dev_stats[smp_processor_id()];
1958 if (stats) {
1959 stats->TxFrames++;
1960 stats->TxWords += wlen;
1961 }
1962 - skb->dev = fc->real_dev;
1963
1964 + /* send down to lld */
1965 fr_dev(fp) = lp;
1966 if (fc->fcoe_pending_queue.qlen)
1967 rc = fcoe_check_wait_queue(lp);
1968 @@ -355,7 +502,15 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1969
1970 return 0;
1971 }
1972 +EXPORT_SYMBOL_GPL(fcoe_xmit);
1973
1974 +/*
1975 + * fcoe_percpu_receive_thread - recv thread per cpu
1976 + * @arg: ptr to the fcoe per cpu struct
1977 + *
1978 + * Return: 0 for success
1979 + *
1980 + */
1981 int fcoe_percpu_receive_thread(void *arg)
1982 {
1983 struct fcoe_percpu_s *p = arg;
1984 @@ -368,7 +523,6 @@ int fcoe_percpu_receive_thread(void *arg)
1985 struct fc_frame_header *fh;
1986 struct sk_buff *skb;
1987 struct fcoe_crc_eof *cp;
1988 - enum fc_sof sof;
1989 struct fc_frame *fp;
1990 u8 *mac = NULL;
1991 struct fcoe_softc *fc;
1992 @@ -411,7 +565,7 @@ int fcoe_percpu_receive_thread(void *arg)
1993 /*
1994 * Save source MAC address before discarding header.
1995 */
1996 - fc = lp->drv_priv;
1997 + fc = lport_priv(lp);
1998 if (unlikely(fc->flogi_progress))
1999 mac = eth_hdr(skb)->h_source;
2000
2001 @@ -422,7 +576,6 @@ int fcoe_percpu_receive_thread(void *arg)
2002 * Check the header and pull it off.
2003 */
2004 hlen = sizeof(struct fcoe_hdr);
2005 -
2006 hp = (struct fcoe_hdr *)skb->data;
2007 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
2008 if (stats) {
2009 @@ -434,11 +587,10 @@ int fcoe_percpu_receive_thread(void *arg)
2010 kfree_skb(skb);
2011 continue;
2012 }
2013 - sof = hp->fcoe_sof;
2014 - skb_pull(skb, sizeof(*hp));
2015 - fr_len = skb->len - sizeof(struct fcoe_crc_eof);
2016 - skb_trim(skb, fr_len);
2017 + skb_pull(skb, sizeof(struct fcoe_hdr));
2018 tlen = sizeof(struct fcoe_crc_eof);
2019 + fr_len = skb->len - tlen;
2020 + skb_trim(skb, fr_len);
2021
2022 if (unlikely(fr_len > skb->len)) {
2023 if (stats) {
2024 @@ -456,47 +608,61 @@ int fcoe_percpu_receive_thread(void *arg)
2025 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
2026 }
2027
2028 - fp = (struct fc_frame *) skb;
2029 - fc_frame_init(fp);
2030 + fp = (struct fc_frame *)skb;
2031 cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
2032 + fc_frame_init(fp);
2033 fr_eof(fp) = cp->fcoe_eof;
2034 - fr_sof(fp) = sof;
2035 + fr_sof(fp) = hp->fcoe_sof;
2036 fr_dev(fp) = lp;
2037
2038 /*
2039 - * Check the CRC here, unless it's solicited data for SCSI.
2040 - * In that case, the SCSI layer can check it during the copy,
2041 - * and it'll be more cache-efficient.
2042 + * We only check CRC if no offload is available and if it is
2043 + * it's solicited data, in which case, the FCP layer would
2044 + * check it during the copy.
2045 */
2046 + if (lp->crc_offload)
2047 + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
2048 + else
2049 + fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
2050 +
2051 fh = fc_frame_header_get(fp);
2052 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
2053 fh->fh_type == FC_TYPE_FCP) {
2054 - fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
2055 fc_exch_recv(lp, lp->emp, fp);
2056 - } else if (le32_to_cpu(cp->fcoe_crc32) ==
2057 - ~crc32(~0, skb->data, fr_len)) {
2058 - if (unlikely(fc->flogi_progress))
2059 - fcoe_recv_flogi(fc, fp, mac);
2060 - fc_exch_recv(lp, lp->emp, fp);
2061 - } else {
2062 - if (debug_fcoe ||
2063 - (stats && stats->InvalidCRCCount < 5)) {
2064 - printk(KERN_WARNING \
2065 - "fcoe: dropping frame with CRC error");
2066 - }
2067 - if (stats) {
2068 + continue;
2069 + }
2070 + if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
2071 + if (le32_to_cpu(cp->fcoe_crc32) !=
2072 + ~crc32(~0, skb->data, fr_len)) {
2073 + if (debug_fcoe || stats->InvalidCRCCount < 5)
2074 + printk(KERN_WARNING "fcoe: dropping "
2075 + "frame with CRC error\n");
2076 stats->InvalidCRCCount++;
2077 stats->ErrorFrames++;
2078 + fc_frame_free(fp);
2079 + continue;
2080 }
2081 - fc_frame_free(fp);
2082 + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
2083 }
2084 + /* non flogi and non data exchanges are handled here */
2085 + if (unlikely(fc->flogi_progress))
2086 + fcoe_recv_flogi(fc, fp, mac);
2087 + fc_exch_recv(lp, lp->emp, fp);
2088 }
2089 return 0;
2090 }
2091
2092 -/*
2093 - * Snoop potential response to FLOGI or even incoming FLOGI.
2094 - */
2095 +/**
2096 + * fcoe_recv_flogi - flogi receive function
2097 + * @fc: associated fcoe_softc
2098 + * @fp: the recieved frame
2099 + * @sa: the source address of this flogi
2100 + *
2101 + * This is responsible to parse the flogi response and sets the corresponding
2102 + * mac address for the initiator, eitehr OUI based or GW based.
2103 + *
2104 + * Returns: none
2105 + **/
2106 static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
2107 {
2108 struct fc_frame_header *fh;
2109 @@ -543,6 +709,16 @@ static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
2110 }
2111 }
2112
2113 +/**
2114 + * fcoe_watchdog - fcoe timer callback
2115 + * @vp:
2116 + *
2117 + * This checks the pending queue length for fcoe and put fcoe to be paused state
2118 + * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
2119 + * fcoe_hostlist.
2120 + *
2121 + * Returns: 0 for success
2122 + **/
2123 void fcoe_watchdog(ulong vp)
2124 {
2125 struct fc_lport *lp;
2126 @@ -567,27 +743,23 @@ void fcoe_watchdog(ulong vp)
2127 add_timer(&fcoe_timer);
2128 }
2129
2130 -/*
2131 - * the wait_queue is used when the skb transmit fails. skb will go
2132 - * in the wait_queue which will be emptied by the time function OR
2133 - * by the next skb transmit.
2134 - *
2135 - */
2136
2137 -/*
2138 - * Function name : fcoe_check_wait_queue()
2139 +/**
2140 + * fcoe_check_wait_queue - put the skb into fcoe pending xmit queue
2141 + * @lp: the fc_port for this skb
2142 + * @skb: the associated skb to be xmitted
2143 *
2144 - * Return Values : 0 or error
2145 + * This empties the wait_queue, dequeue the head of the wait_queue queue
2146 + * and calls fcoe_start_io() for each packet, if all skb have been
2147 + * transmitted, return 0 if a error occurs, then restore wait_queue and
2148 + * try again later.
2149 *
2150 - * Description : empties the wait_queue
2151 - * dequeue the head of the wait_queue queue and
2152 - * calls fcoe_start_io() for each packet
2153 - * if all skb have been transmitted, return 0
2154 - * if a error occurs, then restore wait_queue and try again
2155 - * later
2156 + * The wait_queue is used when the skb transmit fails. skb will go
2157 + * in the wait_queue which will be emptied by the time function OR
2158 + * by the next skb transmit.
2159 *
2160 - */
2161 -
2162 + * Returns: 0 for success
2163 + **/
2164 static int fcoe_check_wait_queue(struct fc_lport *lp)
2165 {
2166 int rc, unpause = 0;
2167 @@ -595,7 +767,7 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
2168 struct sk_buff *skb;
2169 struct fcoe_softc *fc;
2170
2171 - fc = (struct fcoe_softc *)lp->drv_priv;
2172 + fc = fcoe_softc(lp);
2173 spin_lock_bh(&fc->fcoe_pending_queue.lock);
2174
2175 /*
2176 @@ -622,24 +794,714 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
2177 return fc->fcoe_pending_queue.qlen;
2178 }
2179
2180 +/**
2181 + * fcoe_insert_wait_queue_head - puts skb to fcoe pending queue head
2182 + * @lp: the fc_port for this skb
2183 + * @skb: the associated skb to be xmitted
2184 + *
2185 + * Returns: none
2186 + **/
2187 static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
2188 struct sk_buff *skb)
2189 {
2190 struct fcoe_softc *fc;
2191
2192 - fc = (struct fcoe_softc *)lp->drv_priv;
2193 + fc = fcoe_softc(lp);
2194 spin_lock_bh(&fc->fcoe_pending_queue.lock);
2195 __skb_queue_head(&fc->fcoe_pending_queue, skb);
2196 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2197 }
2198
2199 +/**
2200 + * fcoe_insert_wait_queue - put the skb into fcoe pending queue tail
2201 + * @lp: the fc_port for this skb
2202 + * @skb: the associated skb to be xmitted
2203 + *
2204 + * Returns: none
2205 + **/
2206 static void fcoe_insert_wait_queue(struct fc_lport *lp,
2207 struct sk_buff *skb)
2208 {
2209 struct fcoe_softc *fc;
2210
2211 - fc = (struct fcoe_softc *)lp->drv_priv;
2212 + fc = fcoe_softc(lp);
2213 spin_lock_bh(&fc->fcoe_pending_queue.lock);
2214 __skb_queue_tail(&fc->fcoe_pending_queue, skb);
2215 spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2216 }
2217 +
2218 +/**
2219 + * fcoe_dev_setup - setup link change notification interface
2220 + *
2221 + **/
2222 +static void fcoe_dev_setup(void)
2223 +{
2224 + /*
2225 + * here setup a interface specific wd time to
2226 + * monitor the link state
2227 + */
2228 + register_netdevice_notifier(&fcoe_notifier);
2229 +}
2230 +
2231 +/**
2232 + * fcoe_dev_setup - cleanup link change notification interface
2233 + **/
2234 +static void fcoe_dev_cleanup(void)
2235 +{
2236 + unregister_netdevice_notifier(&fcoe_notifier);
2237 +}
2238 +
2239 +/**
2240 + * fcoe_device_notification - netdev event notification callback
2241 + * @notifier: context of the notification
2242 + * @event: type of event
2243 + * @ptr: fixed array for output parsed ifname
2244 + *
2245 + * This function is called by the ethernet driver in case of link change event
2246 + *
2247 + * Returns: 0 for success
2248 + **/
2249 +static int fcoe_device_notification(struct notifier_block *notifier,
2250 + ulong event, void *ptr)
2251 +{
2252 + struct fc_lport *lp = NULL;
2253 + struct net_device *real_dev = ptr;
2254 + struct fcoe_softc *fc;
2255 + struct fcoe_dev_stats *stats;
2256 + u16 new_status;
2257 + u32 mfs;
2258 + int rc = NOTIFY_OK;
2259 +
2260 + read_lock(&fcoe_hostlist_lock);
2261 + list_for_each_entry(fc, &fcoe_hostlist, list) {
2262 + if (fc->real_dev == real_dev) {
2263 + lp = fc->lp;
2264 + break;
2265 + }
2266 + }
2267 + read_unlock(&fcoe_hostlist_lock);
2268 + if (lp == NULL) {
2269 + rc = NOTIFY_DONE;
2270 + goto out;
2271 + }
2272 +
2273 + new_status = lp->link_status;
2274 + switch (event) {
2275 + case NETDEV_DOWN:
2276 + case NETDEV_GOING_DOWN:
2277 + new_status &= ~FC_LINK_UP;
2278 + break;
2279 + case NETDEV_UP:
2280 + case NETDEV_CHANGE:
2281 + new_status &= ~FC_LINK_UP;
2282 + if (!fcoe_link_ok(lp))
2283 + new_status |= FC_LINK_UP;
2284 + break;
2285 + case NETDEV_CHANGEMTU:
2286 + mfs = fc->real_dev->mtu -
2287 + (sizeof(struct fcoe_hdr) +
2288 + sizeof(struct fcoe_crc_eof));
2289 + if (fc->user_mfs && fc->user_mfs < mfs)
2290 + mfs = fc->user_mfs;
2291 + if (mfs >= FC_MIN_MAX_FRAME)
2292 + fc_set_mfs(lp, mfs);
2293 + new_status &= ~FC_LINK_UP;
2294 + if (!fcoe_link_ok(lp))
2295 + new_status |= FC_LINK_UP;
2296 + break;
2297 + case NETDEV_REGISTER:
2298 + break;
2299 + default:
2300 + FC_DBG("unknown event %ld call", event);
2301 + }
2302 + if (lp->link_status != new_status) {
2303 + if ((new_status & FC_LINK_UP) == FC_LINK_UP)
2304 + fc_linkup(lp);
2305 + else {
2306 + stats = lp->dev_stats[smp_processor_id()];
2307 + if (stats)
2308 + stats->LinkFailureCount++;
2309 + fc_linkdown(lp);
2310 + fcoe_clean_pending_queue(lp);
2311 + }
2312 + }
2313 +out:
2314 + return rc;
2315 +}
2316 +
2317 +/**
2318 + * fcoe_if_to_netdev - parse a name buffer to get netdev
2319 + * @ifname: fixed array for output parsed ifname
2320 + * @buffer: incoming buffer to be copied
2321 + *
2322 + * Returns: NULL or ptr to netdeive
2323 + **/
2324 +static struct net_device *fcoe_if_to_netdev(const char *buffer)
2325 +{
2326 + char *cp;
2327 + char ifname[IFNAMSIZ + 2];
2328 +
2329 + if (buffer) {
2330 + strlcpy(ifname, buffer, IFNAMSIZ);
2331 + cp = ifname + strlen(ifname);
2332 + while (--cp >= ifname && *cp == '\n')
2333 + *cp = '\0';
2334 + return dev_get_by_name(&init_net, ifname);
2335 + }
2336 + return NULL;
2337 +}
2338 +
2339 +/**
2340 + * fcoe_netdev_to_module_owner - finds out the nic drive moddule of the netdev
2341 + * @netdev: the target netdev
2342 + *
2343 + * Returns: ptr to the struct module, NULL for failure
2344 + **/
2345 +static struct module *fcoe_netdev_to_module_owner(
2346 + const struct net_device *netdev)
2347 +{
2348 + struct device *dev;
2349 +
2350 + if (!netdev)
2351 + return NULL;
2352 +
2353 + dev = netdev->dev.parent;
2354 + if (!dev)
2355 + return NULL;
2356 +
2357 + if (!dev->driver)
2358 + return NULL;
2359 +
2360 + return dev->driver->owner;
2361 +}
2362 +
2363 +/**
2364 + * fcoe_ethdrv_get - holds the nic driver module by try_module_get() for
2365 + * the corresponding netdev.
2366 + * @netdev: the target netdev
2367 + *
2368 + * Returns: 0 for succsss
2369 + **/
2370 +static int fcoe_ethdrv_get(const struct net_device *netdev)
2371 +{
2372 + struct module *owner;
2373 +
2374 + owner = fcoe_netdev_to_module_owner(netdev);
2375 + if (owner) {
2376 + printk(KERN_DEBUG "foce:hold driver module %s for %s\n",
2377 + owner->name, netdev->name);
2378 + return try_module_get(owner);
2379 + }
2380 + return -ENODEV;
2381 +}
2382 +
2383 +/**
2384 + * fcoe_ethdrv_get - releases the nic driver module by module_put for
2385 + * the corresponding netdev.
2386 + * @netdev: the target netdev
2387 + *
2388 + * Returns: 0 for succsss
2389 + **/
2390 +static int fcoe_ethdrv_put(const struct net_device *netdev)
2391 +{
2392 + struct module *owner;
2393 +
2394 + owner = fcoe_netdev_to_module_owner(netdev);
2395 + if (owner) {
2396 + printk(KERN_DEBUG "foce:release driver module %s for %s\n",
2397 + owner->name, netdev->name);
2398 + module_put(owner);
2399 + return 0;
2400 + }
2401 + return -ENODEV;
2402 +}
2403 +
2404 +/**
2405 + * fcoe_destroy- handles the destroy from sysfs
2406 + * @buffer: expcted to be a eth if name
2407 + * @kp: associated kernel param
2408 + *
2409 + * Returns: 0 for success
2410 + **/
2411 +static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
2412 +{
2413 + int rc;
2414 + struct net_device *netdev;
2415 +
2416 + netdev = fcoe_if_to_netdev(buffer);
2417 + if (!netdev) {
2418 + rc = -ENODEV;
2419 + goto out_nodev;
2420 + }
2421 + /* look for existing lport */
2422 + if (!fcoe_hostlist_lookup(netdev)) {
2423 + rc = -ENODEV;
2424 + goto out_putdev;
2425 + }
2426 + /* pass to transport */
2427 + rc = fcoe_transport_release(netdev);
2428 + if (rc) {
2429 + printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n",
2430 + netdev->name);
2431 + rc = -EIO;
2432 + goto out_putdev;
2433 + }
2434 + fcoe_ethdrv_put(netdev);
2435 + rc = 0;
2436 +out_putdev:
2437 + dev_put(netdev);
2438 +out_nodev:
2439 + return rc;
2440 +}
2441 +
2442 +/**
2443 + * fcoe_create - handles the create call from sysfs
2444 + * @buffer: expcted to be a eth if name
2445 + * @kp: associated kernel param
2446 + *
2447 + * Returns: 0 for success
2448 + **/
2449 +static int fcoe_create(const char *buffer, struct kernel_param *kp)
2450 +{
2451 + int rc;
2452 + struct net_device *netdev;
2453 +
2454 + netdev = fcoe_if_to_netdev(buffer);
2455 + if (!netdev) {
2456 + rc = -ENODEV;
2457 + goto out_nodev;
2458 + }
2459 + /* look for existing lport */
2460 + if (fcoe_hostlist_lookup(netdev)) {
2461 + rc = -EEXIST;
2462 + goto out_putdev;
2463 + }
2464 + fcoe_ethdrv_get(netdev);
2465 +
2466 + /* pass to transport */
2467 + rc = fcoe_transport_attach(netdev);
2468 + if (rc) {
2469 + printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n",
2470 + netdev->name);
2471 + fcoe_ethdrv_put(netdev);
2472 + rc = -EIO;
2473 + goto out_putdev;
2474 + }
2475 + rc = 0;
2476 +out_putdev:
2477 + dev_put(netdev);
2478 +out_nodev:
2479 + return rc;
2480 +}
2481 +
2482 +module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
2483 +__MODULE_PARM_TYPE(create, "string");
2484 +MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
2485 +module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
2486 +__MODULE_PARM_TYPE(destroy, "string");
2487 +MODULE_PARM_DESC(destroy, "Destroy fcoe port");
2488 +
2489 +/*
2490 + * fcoe_link_ok - check if link is ok for the fc_lport
2491 + * @lp: ptr to the fc_lport
2492 + *
2493 + * Any permanently-disqualifying conditions have been previously checked.
2494 + * This also updates the speed setting, which may change with link for 100/1000.
2495 + *
2496 + * This function should probably be checking for PAUSE support at some point
2497 + * in the future. Currently Per-priority-pause is not determinable using
2498 + * ethtool, so we shouldn't be restrictive until that problem is resolved.
2499 + *
2500 + * Returns: 0 if link is OK for use by FCoE.
2501 + *
2502 + */
2503 +int fcoe_link_ok(struct fc_lport *lp)
2504 +{
2505 + struct fcoe_softc *fc = fcoe_softc(lp);
2506 + struct net_device *dev = fc->real_dev;
2507 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
2508 + int rc = 0;
2509 +
2510 + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
2511 + dev = fc->phys_dev;
2512 + if (dev->ethtool_ops->get_settings) {
2513 + dev->ethtool_ops->get_settings(dev, &ecmd);
2514 + lp->link_supported_speeds &=
2515 + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
2516 + if (ecmd.supported & (SUPPORTED_1000baseT_Half |
2517 + SUPPORTED_1000baseT_Full))
2518 + lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
2519 + if (ecmd.supported & SUPPORTED_10000baseT_Full)
2520 + lp->link_supported_speeds |=
2521 + FC_PORTSPEED_10GBIT;
2522 + if (ecmd.speed == SPEED_1000)
2523 + lp->link_speed = FC_PORTSPEED_1GBIT;
2524 + if (ecmd.speed == SPEED_10000)
2525 + lp->link_speed = FC_PORTSPEED_10GBIT;
2526 + }
2527 + } else
2528 + rc = -1;
2529 +
2530 + return rc;
2531 +}
2532 +EXPORT_SYMBOL_GPL(fcoe_link_ok);
2533 +
2534 +/*
2535 + * fcoe_percpu_clean - frees skb of the corresponding lport from the per
2536 + * cpu queue.
2537 + * @lp: the fc_lport
2538 + */
2539 +void fcoe_percpu_clean(struct fc_lport *lp)
2540 +{
2541 + int idx;
2542 + struct fcoe_percpu_s *pp;
2543 + struct fcoe_rcv_info *fr;
2544 + struct sk_buff_head *list;
2545 + struct sk_buff *skb, *next;
2546 + struct sk_buff *head;
2547 +
2548 + for (idx = 0; idx < NR_CPUS; idx++) {
2549 + if (fcoe_percpu[idx]) {
2550 + pp = fcoe_percpu[idx];
2551 + spin_lock_bh(&pp->fcoe_rx_list.lock);
2552 + list = &pp->fcoe_rx_list;
2553 + head = list->next;
2554 + for (skb = head; skb != (struct sk_buff *)list;
2555 + skb = next) {
2556 + next = skb->next;
2557 + fr = fcoe_dev_from_skb(skb);
2558 + if (fr->fr_dev == lp) {
2559 + __skb_unlink(skb, list);
2560 + kfree_skb(skb);
2561 + }
2562 + }
2563 + spin_unlock_bh(&pp->fcoe_rx_list.lock);
2564 + }
2565 + }
2566 +}
2567 +EXPORT_SYMBOL_GPL(fcoe_percpu_clean);
2568 +
2569 +/**
2570 + * fcoe_clean_pending_queue - dequeue skb and free it
2571 + * @lp: the corresponding fc_lport
2572 + *
2573 + * Returns: none
2574 + **/
2575 +void fcoe_clean_pending_queue(struct fc_lport *lp)
2576 +{
2577 + struct fcoe_softc *fc = lport_priv(lp);
2578 + struct sk_buff *skb;
2579 +
2580 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2581 + while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
2582 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2583 + kfree_skb(skb);
2584 + spin_lock_bh(&fc->fcoe_pending_queue.lock);
2585 + }
2586 + spin_unlock_bh(&fc->fcoe_pending_queue.lock);
2587 +}
2588 +EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
2589 +
2590 +/**
2591 + * libfc_host_alloc - allocate a Scsi_Host with room for the fc_lport
2592 + * @sht: ptr to the scsi host templ
2593 + * @priv_size: size of private data after fc_lport
2594 + *
2595 + * Returns: ptr to Scsi_Host
2596 + * TODO - to libfc?
2597 + */
2598 +static inline struct Scsi_Host *libfc_host_alloc(
2599 + struct scsi_host_template *sht, int priv_size)
2600 +{
2601 + return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
2602 +}
2603 +
2604 +/**
2605 + * fcoe_host_alloc - allocate a Scsi_Host with room for the fcoe_softc
2606 + * @sht: ptr to the scsi host templ
2607 + * @priv_size: size of private data after fc_lport
2608 + *
2609 + * Returns: ptr to Scsi_Host
2610 + */
2611 +struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size)
2612 +{
2613 + return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size);
2614 +}
2615 +EXPORT_SYMBOL_GPL(fcoe_host_alloc);
2616 +
2617 +/*
2618 + * fcoe_reset - resets the fcoe
2619 + * @shost: shost the reset is from
2620 + *
2621 + * Returns: always 0
2622 + */
2623 +int fcoe_reset(struct Scsi_Host *shost)
2624 +{
2625 + struct fc_lport *lport = shost_priv(shost);
2626 + fc_lport_reset(lport);
2627 + return 0;
2628 +}
2629 +EXPORT_SYMBOL_GPL(fcoe_reset);
2630 +
2631 +/*
2632 + * fcoe_wwn_from_mac - converts 48-bit IEEE MAC address to 64-bit FC WWN.
2633 + * @mac: mac address
2634 + * @scheme: check port
2635 + * @port: port indicator for converting
2636 + *
2637 + * Returns: u64 fc world wide name
2638 + */
2639 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
2640 + unsigned int scheme, unsigned int port)
2641 +{
2642 + u64 wwn;
2643 + u64 host_mac;
2644 +
2645 + /* The MAC is in NO, so flip only the low 48 bits */
2646 + host_mac = ((u64) mac[0] << 40) |
2647 + ((u64) mac[1] << 32) |
2648 + ((u64) mac[2] << 24) |
2649 + ((u64) mac[3] << 16) |
2650 + ((u64) mac[4] << 8) |
2651 + (u64) mac[5];
2652 +
2653 + WARN_ON(host_mac >= (1ULL << 48));
2654 + wwn = host_mac | ((u64) scheme << 60);
2655 + switch (scheme) {
2656 + case 1:
2657 + WARN_ON(port != 0);
2658 + break;
2659 + case 2:
2660 + WARN_ON(port >= 0xfff);
2661 + wwn |= (u64) port << 48;
2662 + break;
2663 + default:
2664 + WARN_ON(1);
2665 + break;
2666 + }
2667 +
2668 + return wwn;
2669 +}
2670 +EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
2671 +/*
2672 + * fcoe_hostlist_lookup_softc - find the corresponding lport by a given device
2673 + * @device: this is currently ptr to net_device
2674 + *
2675 + * Returns: NULL or the located fcoe_softc
2676 + */
2677 +static struct fcoe_softc *fcoe_hostlist_lookup_softc(
2678 + const struct net_device *dev)
2679 +{
2680 + struct fcoe_softc *fc;
2681 +
2682 + read_lock(&fcoe_hostlist_lock);
2683 + list_for_each_entry(fc, &fcoe_hostlist, list) {
2684 + if (fc->real_dev == dev) {
2685 + read_unlock(&fcoe_hostlist_lock);
2686 + return fc;
2687 + }
2688 + }
2689 + read_unlock(&fcoe_hostlist_lock);
2690 + return NULL;
2691 +}
2692 +
2693 +/*
2694 + * fcoe_hostlist_lookup - find the corresponding lport by netdev
2695 + * @netdev: ptr to net_device
2696 + *
2697 + * Returns: 0 for success
2698 + */
2699 +struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2700 +{
2701 + struct fcoe_softc *fc;
2702 +
2703 + fc = fcoe_hostlist_lookup_softc(netdev);
2704 +
2705 + return (fc) ? fc->lp : NULL;
2706 +}
2707 +EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup);
2708 +
2709 +/*
2710 + * fcoe_hostlist_add - add a lport to lports list
2711 + * @lp: ptr to the fc_lport to badded
2712 + *
2713 + * Returns: 0 for success
2714 + */
2715 +int fcoe_hostlist_add(const struct fc_lport *lp)
2716 +{
2717 + struct fcoe_softc *fc;
2718 +
2719 + fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
2720 + if (!fc) {
2721 + fc = fcoe_softc(lp);
2722 + write_lock_bh(&fcoe_hostlist_lock);
2723 + list_add_tail(&fc->list, &fcoe_hostlist);
2724 + write_unlock_bh(&fcoe_hostlist_lock);
2725 + }
2726 + return 0;
2727 +}
2728 +EXPORT_SYMBOL_GPL(fcoe_hostlist_add);
2729 +
2730 +/*
2731 + * fcoe_hostlist_remove - remove a lport from lports list
2732 + * @lp: ptr to the fc_lport to badded
2733 + *
2734 + * Returns: 0 for success
2735 + */
2736 +int fcoe_hostlist_remove(const struct fc_lport *lp)
2737 +{
2738 + struct fcoe_softc *fc;
2739 +
2740 + fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
2741 + BUG_ON(!fc);
2742 + write_lock_bh(&fcoe_hostlist_lock);
2743 + list_del(&fc->list);
2744 + write_unlock_bh(&fcoe_hostlist_lock);
2745 +
2746 + return 0;
2747 +}
2748 +EXPORT_SYMBOL_GPL(fcoe_hostlist_remove);
2749 +
2750 +/**
2751 + * fcoe_libfc_config - sets up libfc related properties for lport
2752 + * @lp: ptr to the fc_lport
2753 + * @tt: libfc function template
2754 + *
2755 + * Returns : 0 for success
2756 + **/
2757 +int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt)
2758 +{
2759 + /* Set the function pointers set by the LLDD */
2760 + memcpy(&lp->tt, tt, sizeof(*tt));
2761 + if (fc_fcp_init(lp))
2762 + return -ENOMEM;
2763 + fc_exch_init(lp);
2764 + fc_elsct_init(lp);
2765 + fc_lport_init(lp);
2766 + fc_rport_init(lp);
2767 + fc_disc_init(lp);
2768 +
2769 + return 0;
2770 +}
2771 +EXPORT_SYMBOL_GPL(fcoe_libfc_config);
2772 +
2773 +/**
2774 + * fcoe_init - fcoe module loading initialization
2775 + *
2776 + * Initialization routine
2777 + * 1. Will create fc transport software structure
2778 + * 2. initialize the link list of port information structure
2779 + *
2780 + * Returns 0 on success, negative on failure
2781 + **/
2782 +static int __init fcoe_init(void)
2783 +{
2784 + int cpu;
2785 + struct fcoe_percpu_s *p;
2786 +
2787 +
2788 + INIT_LIST_HEAD(&fcoe_hostlist);
2789 + rwlock_init(&fcoe_hostlist_lock);
2790 +
2791 +#ifdef CONFIG_HOTPLUG_CPU
2792 + register_cpu_notifier(&fcoe_cpu_notifier);
2793 +#endif /* CONFIG_HOTPLUG_CPU */
2794 +
2795 + /*
2796 + * initialize per CPU interrupt thread
2797 + */
2798 + for_each_online_cpu(cpu) {
2799 + p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
2800 + if (p) {
2801 + p->thread = kthread_create(fcoe_percpu_receive_thread,
2802 + (void *)p,
2803 + "fcoethread/%d", cpu);
2804 +
2805 + /*
2806 + * if there is no error then bind the thread to the cpu
2807 + * initialize the semaphore and skb queue head
2808 + */
2809 + if (likely(!IS_ERR(p->thread))) {
2810 + p->cpu = cpu;
2811 + fcoe_percpu[cpu] = p;
2812 + skb_queue_head_init(&p->fcoe_rx_list);
2813 + kthread_bind(p->thread, cpu);
2814 + wake_up_process(p->thread);
2815 + } else {
2816 + fcoe_percpu[cpu] = NULL;
2817 + kfree(p);
2818 +
2819 + }
2820 + }
2821 + }
2822 +
2823 + /*
2824 + * setup link change notification
2825 + */
2826 + fcoe_dev_setup();
2827 +
2828 + init_timer(&fcoe_timer);
2829 + fcoe_timer.data = 0;
2830 + fcoe_timer.function = fcoe_watchdog;
2831 + fcoe_timer.expires = (jiffies + (10 * HZ));
2832 + add_timer(&fcoe_timer);
2833 +
2834 + /* initiatlize the fcoe transport */
2835 + fcoe_transport_init();
2836 +
2837 + fcoe_sw_init();
2838 +
2839 + return 0;
2840 +}
2841 +module_init(fcoe_init);
2842 +
2843 +/**
2844 + * fcoe_exit - fcoe module unloading cleanup
2845 + *
2846 + * Returns 0 on success, negative on failure
2847 + **/
2848 +static void __exit fcoe_exit(void)
2849 +{
2850 + u32 idx;
2851 + struct fcoe_softc *fc, *tmp;
2852 + struct fcoe_percpu_s *p;
2853 + struct sk_buff *skb;
2854 +
2855 + /*
2856 + * Stop all call back interfaces
2857 + */
2858 +#ifdef CONFIG_HOTPLUG_CPU
2859 + unregister_cpu_notifier(&fcoe_cpu_notifier);
2860 +#endif /* CONFIG_HOTPLUG_CPU */
2861 + fcoe_dev_cleanup();
2862 +
2863 + /*
2864 + * stop timer
2865 + */
2866 + del_timer_sync(&fcoe_timer);
2867 +
2868 + /* releases the assocaited fcoe transport for each lport */
2869 + list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
2870 + fcoe_transport_release(fc->real_dev);
2871 +
2872 + for (idx = 0; idx < NR_CPUS; idx++) {
2873 + if (fcoe_percpu[idx]) {
2874 + kthread_stop(fcoe_percpu[idx]->thread);
2875 + p = fcoe_percpu[idx];
2876 + spin_lock_bh(&p->fcoe_rx_list.lock);
2877 + while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
2878 + kfree_skb(skb);
2879 + spin_unlock_bh(&p->fcoe_rx_list.lock);
2880 + if (fcoe_percpu[idx]->crc_eof_page)
2881 + put_page(fcoe_percpu[idx]->crc_eof_page);
2882 + kfree(fcoe_percpu[idx]);
2883 + }
2884 + }
2885 +
2886 + /* remove sw trasnport */
2887 + fcoe_sw_exit();
2888 +
2889 + /* detach the transport */
2890 + fcoe_transport_exit();
2891 +}
2892 +module_exit(fcoe_exit);
2893 diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
2894 index e6d4086..55f982d 100644
2895 --- a/drivers/scsi/libfc/Makefile
2896 +++ b/drivers/scsi/libfc/Makefile
2897 @@ -5,6 +5,7 @@ obj-$(CONFIG_LIBFC) += libfc.o
2898 libfc-objs := \
2899 fc_disc.o \
2900 fc_exch.o \
2901 + fc_elsct.o \
2902 fc_frame.o \
2903 fc_lport.o \
2904 fc_rport.o \
2905 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
2906 index d50f1a5..aee2f9c 100644
2907 --- a/drivers/scsi/libfc/fc_disc.c
2908 +++ b/drivers/scsi/libfc/fc_disc.c
2909 @@ -30,11 +30,13 @@
2910
2911 #include <scsi/fc/fc_gs.h>
2912
2913 -#include <scsi/libfc/libfc.h>
2914 +#include <scsi/libfc.h>
2915
2916 #define FC_DISC_RETRY_LIMIT 3 /* max retries */
2917 #define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
2918
2919 +#define FC_DISC_DELAY 3
2920 +
2921 static int fc_disc_debug;
2922
2923 #define FC_DEBUG_DISC(fmt...) \
2924 @@ -43,26 +45,182 @@ static int fc_disc_debug;
2925 FC_DBG(fmt); \
2926 } while (0)
2927
2928 -static void fc_disc_gpn_ft_req(struct fc_lport *);
2929 +static struct mutex disc_list_lock;
2930 +static struct list_head disc_list;
2931 +
2932 +struct fc_disc {
2933 + unsigned char retry_count;
2934 + unsigned char delay;
2935 + unsigned char pending;
2936 + unsigned char requested;
2937 + unsigned short seq_count;
2938 + unsigned char buf_len;
2939 + enum fc_disc_event event;
2940 +
2941 + void (*disc_callback)(struct fc_lport *,
2942 + enum fc_disc_event);
2943 +
2944 + struct list_head rports;
2945 + struct fc_lport *lport;
2946 + struct mutex disc_mutex;
2947 + struct fc_gpn_ft_resp partial_buf; /* partial name buffer */
2948 + struct delayed_work disc_work;
2949 +
2950 + struct list_head list;
2951 +};
2952 +
2953 +static void fc_disc_gpn_ft_req(struct fc_disc *);
2954 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
2955 -static int fc_disc_new_target(struct fc_lport *, struct fc_rport *,
2956 +static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
2957 struct fc_rport_identifiers *);
2958 -static void fc_disc_del_target(struct fc_lport *, struct fc_rport *);
2959 -static void fc_disc_done(struct fc_lport *);
2960 -static void fc_disc_error(struct fc_lport *, struct fc_frame *);
2961 +static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
2962 +static void fc_disc_done(struct fc_disc *);
2963 static void fc_disc_timeout(struct work_struct *);
2964 -static void fc_disc_single(struct fc_lport *, struct fc_disc_port *);
2965 -static int fc_disc_restart(struct fc_lport *);
2966 +static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
2967 +static void fc_disc_restart(struct fc_disc *);
2968 +
2969 +/**
2970 + * fc_disc_lookup_rport - lookup a remote port by port_id
2971 + * @lport: Fibre Channel host port instance
2972 + * @port_id: remote port port_id to match
2973 + */
2974 +struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
2975 + u32 port_id)
2976 +{
2977 + struct fc_disc *disc;
2978 + struct fc_rport *rport, *found = NULL;
2979 + struct fc_rport_libfc_priv *rdata;
2980 + int disc_found = 0;
2981 +
2982 + mutex_lock(&disc_list_lock);
2983 + list_for_each_entry(disc, &disc_list, list) {
2984 + if (disc->lport == lport) {
2985 + list_for_each_entry(rdata, &disc->rports, peers) {
2986 + rport = PRIV_TO_RPORT(rdata);
2987 + if (rport->port_id == port_id) {
2988 + disc_found = 1;
2989 + found = rport;
2990 + get_device(&found->dev);
2991 + break;
2992 + }
2993 + }
2994 + }
2995 + }
2996 + mutex_unlock(&disc_list_lock);
2997 +
2998 + if (!disc_found) {
2999 + FC_DEBUG_DISC("The rport (%6x) for lport (%6x) "
3000 + "is not maintained by the discovery layer\n",
3001 + port_id, fc_host_port_id(lport->host));
3002 + found = NULL;
3003 + }
3004 +
3005 + return found;
3006 +}
3007 +
3008 +/**
3009 + * fc_disc_alloc - Allocate a discovery work object
3010 + * @lport: The FC lport associated with the discovery job
3011 + */
3012 +static inline struct fc_disc *fc_disc_alloc(struct fc_lport *lport)
3013 +{
3014 + struct fc_disc *disc;
3015 +
3016 + disc = kzalloc(sizeof(struct fc_disc), GFP_KERNEL);
3017 + INIT_LIST_HEAD(&disc->list);
3018 + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
3019 + mutex_init(&disc->disc_mutex);
3020 + INIT_LIST_HEAD(&disc->rports);
3021 +
3022 + disc->lport = lport;
3023 + disc->delay = FC_DISC_DELAY;
3024 + disc->event = DISC_EV_NONE;
3025 +
3026 + mutex_lock(&disc_list_lock);
3027 + list_add_tail(&disc->list, &disc_list);
3028 + mutex_unlock(&disc_list_lock);
3029 +
3030 + return disc;
3031 +}
3032 +
3033 +/**
3034 + * fc_disc_stop_rports - delete all the remote ports associated with the lport
3035 + * @disc: The discovery job to stop rports on
3036 + *
3037 + * Locking Note: This function expects that the lport mutex is locked before
3038 + * calling it.
3039 + */
3040 +void fc_disc_stop_rports(struct fc_disc *disc)
3041 +{
3042 + struct fc_lport *lport;
3043 + struct fc_rport *rport;
3044 + struct fc_rport_libfc_priv *rdata, *next;
3045 +
3046 + lport = disc->lport;
3047 +
3048 + mutex_lock(&disc->disc_mutex);
3049 + list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
3050 + rport = PRIV_TO_RPORT(rdata);
3051 + list_del(&rdata->peers);
3052 + lport->tt.rport_logoff(rport);
3053 + }
3054 +
3055 + mutex_unlock(&disc->disc_mutex);
3056 +}
3057 +
3058 +/**
3059 + * fc_disc_rport_event - Event handler for rport events
3060 + * @lport: The lport which is receiving the event
3061 + * @rport: The rport which the event has occured on
3062 + * @event: The event that occured
3063 + *
3064 + * Locking Note: The rport lock should not be held when calling
3065 + * this function.
3066 + */
3067 +static void fc_disc_rport_event(struct fc_lport *lport,
3068 + struct fc_rport *rport,
3069 + enum fc_lport_event event)
3070 +{
3071 + struct fc_rport_libfc_priv *rdata = rport->dd_data;
3072 + struct fc_disc *disc;
3073 + int found = 0;
3074 +
3075 + FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
3076 + rport->port_id);
3077 +
3078 + if (event == RPORT_EV_CREATED) {
3079 + mutex_lock(&disc_list_lock);
3080 + list_for_each_entry(disc, &disc_list, list) {
3081 + if (disc->lport == lport) {
3082 + found = 1;
3083 + mutex_lock(&disc->disc_mutex);
3084 + list_add_tail(&rdata->peers, &disc->rports);
3085 + mutex_unlock(&disc->disc_mutex);
3086 + }
3087 + }
3088 + mutex_unlock(&disc_list_lock);
3089 + }
3090 +
3091 + if (!found)
3092 + FC_DEBUG_DISC("The rport (%6x) is not maintained "
3093 + "by the discovery layer\n", rport->port_id);
3094 +}
3095
3096 /**
3097 * fc_disc_recv_rscn_req - Handle Registered State Change Notification (RSCN)
3098 * @sp: Current sequence of the RSCN exchange
3099 * @fp: RSCN Frame
3100 * @lport: Fibre Channel host port instance
3101 + *
3102 + * Locking Note: This function expects that the disc_mutex is locked
3103 + * before it is called.
3104 */
3105 static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3106 - struct fc_lport *lport)
3107 + struct fc_disc *disc)
3108 {
3109 + struct fc_lport *lport;
3110 + struct fc_rport *rport;
3111 + struct fc_rport_libfc_priv *rdata;
3112 struct fc_els_rscn *rp;
3113 struct fc_els_rscn_page *pp;
3114 struct fc_seq_els_data rjt_data;
3115 @@ -70,9 +228,14 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3116 int redisc = 0;
3117 enum fc_els_rscn_ev_qual ev_qual;
3118 enum fc_els_rscn_addr_fmt fmt;
3119 - LIST_HEAD(disc_list);
3120 + LIST_HEAD(disc_ports);
3121 struct fc_disc_port *dp, *next;
3122
3123 + lport = disc->lport;
3124 +
3125 + FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
3126 + fc_host_port_id(lport->host));
3127 +
3128 rp = fc_frame_payload_get(fp, sizeof(*rp));
3129
3130 if (!rp || rp->rscn_page_len != sizeof(*pp))
3131 @@ -106,7 +269,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3132 dp->ids.port_name = -1;
3133 dp->ids.node_name = -1;
3134 dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
3135 - list_add_tail(&dp->peers, &disc_list);
3136 + list_add_tail(&dp->peers, &disc_ports);
3137 break;
3138 case ELS_ADDR_FMT_AREA:
3139 case ELS_ADDR_FMT_DOM:
3140 @@ -120,18 +283,20 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
3141 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
3142 if (redisc) {
3143 FC_DEBUG_DISC("RSCN received: rediscovering\n");
3144 - list_for_each_entry_safe(dp, next, &disc_list, peers) {
3145 - list_del(&dp->peers);
3146 - kfree(dp);
3147 - }
3148 - fc_disc_restart(lport);
3149 + fc_disc_restart(disc);
3150 } else {
3151 FC_DEBUG_DISC("RSCN received: not rediscovering. "
3152 "redisc %d state %d in_prog %d\n",
3153 - redisc, lport->state, lport->disc_pending);
3154 - list_for_each_entry_safe(dp, next, &disc_list, peers) {
3155 + redisc, lport->state, disc->pending);
3156 + list_for_each_entry_safe(dp, next, &disc_ports, peers) {
3157 list_del(&dp->peers);
3158 - fc_disc_single(lport, dp);
3159 + rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
3160 + if (rport) {
3161 + rdata = RPORT_TO_PRIV(rport);
3162 + list_del(&rdata->peers);
3163 + lport->tt.rport_logoff(rport);
3164 + }
3165 + fc_disc_single(disc, dp);
3166 }
3167 }
3168 fc_frame_free(fp);
3169 @@ -149,16 +314,39 @@ reject:
3170 * @sp: Current sequence of the request exchange
3171 * @fp: The frame
3172 * @lport: The FC local port
3173 + *
3174 + * Locking Note: This function is called from the EM and will lock
3175 + * the disc_mutex before calling the handler for the
3176 + * request.
3177 */
3178 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
3179 struct fc_lport *lport)
3180 {
3181 u8 op;
3182 + struct fc_disc *disc;
3183 + int found = 0;
3184 +
3185 + mutex_lock(&disc_list_lock);
3186 + list_for_each_entry(disc, &disc_list, list) {
3187 + if (disc->lport == lport) {
3188 + found = 1;
3189 + break;
3190 + }
3191 + }
3192 + mutex_unlock(&disc_list_lock);
3193 +
3194 + if (!found) {
3195 + FC_DBG("Received a request for an lport not managed "
3196 + "by the discovery engine\n");
3197 + return;
3198 + }
3199
3200 op = fc_frame_payload_op(fp);
3201 switch (op) {
3202 case ELS_RSCN:
3203 - fc_disc_recv_rscn_req(sp, fp, lport);
3204 + mutex_lock(&disc->disc_mutex);
3205 + fc_disc_recv_rscn_req(sp, fp, disc);
3206 + mutex_unlock(&disc->disc_mutex);
3207 break;
3208 default:
3209 FC_DBG("Received an unsupported request. opcode (%x)\n", op);
3210 @@ -168,16 +356,30 @@ static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
3211
3212 /**
3213 * fc_disc_restart - Restart discovery
3214 - * @lport: FC local port
3215 + * @lport: FC discovery context
3216 + *
3217 + * Locking Note: This function expects that the disc mutex
3218 + * is already locked.
3219 */
3220 -static int fc_disc_restart(struct fc_lport *lport)
3221 +static void fc_disc_restart(struct fc_disc *disc)
3222 {
3223 - if (!lport->disc_requested && !lport->disc_pending) {
3224 - schedule_delayed_work(&lport->disc_work,
3225 - msecs_to_jiffies(lport->disc_delay * 1000));
3226 + struct fc_rport *rport;
3227 + struct fc_rport_libfc_priv *rdata, *next;
3228 + struct fc_lport *lport = disc->lport;
3229 +
3230 + FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
3231 + fc_host_port_id(lport->host));
3232 +
3233 + list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
3234 + rport = PRIV_TO_RPORT(rdata);
3235 + FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
3236 + list_del(&rdata->peers);
3237 + lport->tt.rport_logoff(rport);
3238 }
3239 - lport->disc_requested = 1;
3240 - return 0;
3241 +
3242 + disc->requested = 1;
3243 + if (!disc->pending)
3244 + fc_disc_gpn_ft_req(disc);
3245 }
3246
3247 /**
3248 @@ -186,29 +388,58 @@ static int fc_disc_restart(struct fc_lport *lport)
3249 *
3250 * Returns non-zero if discovery cannot be started.
3251 */
3252 -static int fc_disc_start(struct fc_lport *lport)
3253 +static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
3254 + enum fc_disc_event),
3255 + struct fc_lport *lport)
3256 {
3257 struct fc_rport *rport;
3258 - int error;
3259 struct fc_rport_identifiers ids;
3260 + struct fc_disc *disc;
3261 + int found = 0;
3262 +
3263 + mutex_lock(&disc_list_lock);
3264 + list_for_each_entry(disc, &disc_list, list) {
3265 + if (disc->lport == lport) {
3266 + found = 1;
3267 + break;
3268 + }
3269 + }
3270 + mutex_unlock(&disc_list_lock);
3271 +
3272 + if (!found) {
3273 + FC_DEBUG_DISC("No existing discovery job, "
3274 + "creating one for lport (%6x)\n",
3275 + fc_host_port_id(lport->host));
3276 + disc = fc_disc_alloc(lport);
3277 + } else
3278 + FC_DEBUG_DISC("Found an existing discovery job "
3279 + "for lport (%6x)\n",
3280 + fc_host_port_id(lport->host));
3281 +
3282 + /*
3283 + * At this point we may have a new disc job or an existing
3284 + * one. Either way, let's lock when we make changes to it
3285 + * and send the GPN_FT request.
3286 + */
3287 + mutex_lock(&disc->disc_mutex);
3288 +
3289 + disc->disc_callback = disc_callback;
3290
3291 /*
3292 * If not ready, or already running discovery, just set request flag.
3293 */
3294 - if (!fc_lport_test_ready(lport) || lport->disc_pending) {
3295 - lport->disc_requested = 1;
3296 + disc->requested = 1;
3297
3298 - return 0;
3299 + if (disc->pending) {
3300 + mutex_unlock(&disc->disc_mutex);
3301 + return;
3302 }
3303 - lport->disc_pending = 1;
3304 - lport->disc_requested = 0;
3305 - lport->disc_retry_count = 0;
3306
3307 /*
3308 * Handle point-to-point mode as a simple discovery
3309 - * of the remote port.
3310 + * of the remote port. Yucky, yucky, yuck, yuck!
3311 */
3312 - rport = lport->ptp_rp;
3313 + rport = disc->lport->ptp_rp;
3314 if (rport) {
3315 ids.port_id = rport->port_id;
3316 ids.port_name = rport->port_name;
3317 @@ -216,32 +447,16 @@ static int fc_disc_start(struct fc_lport *lport)
3318 ids.roles = FC_RPORT_ROLE_UNKNOWN;
3319 get_device(&rport->dev);
3320
3321 - error = fc_disc_new_target(lport, rport, &ids);
3322 + if (!fc_disc_new_target(disc, rport, &ids)) {
3323 + disc->event = DISC_EV_SUCCESS;
3324 + fc_disc_done(disc);
3325 + }
3326 put_device(&rport->dev);
3327 - if (!error)
3328 - fc_disc_done(lport);
3329 } else {
3330 - fc_disc_gpn_ft_req(lport); /* get ports by FC-4 type */
3331 - error = 0;
3332 + fc_disc_gpn_ft_req(disc); /* get ports by FC-4 type */
3333 }
3334 - return error;
3335 -}
3336 -
3337 -/**
3338 - * fc_disc_retry - Retry discovery
3339 - * @lport: FC local port
3340 - */
3341 -static void fc_disc_retry(struct fc_lport *lport)
3342 -{
3343 - unsigned long delay = FC_DISC_RETRY_DELAY;
3344
3345 - if (!lport->disc_retry_count)
3346 - delay /= 4; /* timeout faster first time */
3347 - if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT)
3348 - schedule_delayed_work(&lport->disc_work,
3349 - msecs_to_jiffies(delay));
3350 - else
3351 - fc_disc_done(lport);
3352 + mutex_unlock(&disc->disc_mutex);
3353 }
3354
3355 /**
3356 @@ -249,11 +464,15 @@ static void fc_disc_retry(struct fc_lport *lport)
3357 * @lport: FC local port
3358 * @rport: The previous FC remote port (NULL if new remote port)
3359 * @ids: Identifiers for the new FC remote port
3360 + *
3361 + * Locking Note: This function expects that the disc_mutex is locked
3362 + * before it is called.
3363 */
3364 -static int fc_disc_new_target(struct fc_lport *lport,
3365 +static int fc_disc_new_target(struct fc_disc *disc,
3366 struct fc_rport *rport,
3367 struct fc_rport_identifiers *ids)
3368 {
3369 + struct fc_lport *lport = disc->lport;
3370 struct fc_rport_libfc_priv *rp;
3371 int error = 0;
3372
3373 @@ -272,7 +491,7 @@ static int fc_disc_new_target(struct fc_lport *lport,
3374 * assigned the same FCID. This should be rare.
3375 * Delete the old one and fall thru to re-create.
3376 */
3377 - fc_disc_del_target(lport, rport);
3378 + fc_disc_del_target(disc, rport);
3379 rport = NULL;
3380 }
3381 }
3382 @@ -295,7 +514,7 @@ static int fc_disc_new_target(struct fc_lport *lport,
3383 }
3384 if (rport) {
3385 rp = rport->dd_data;
3386 - rp->event_callback = lport->tt.event_callback;
3387 + rp->event_callback = fc_disc_rport_event;
3388 rp->rp_state = RPORT_ST_INIT;
3389 lport->tt.rport_login(rport);
3390 }
3391 @@ -305,89 +524,111 @@ static int fc_disc_new_target(struct fc_lport *lport,
3392
3393 /**
3394 * fc_disc_del_target - Delete a target
3395 - * @lport: FC local port
3396 + * @disc: FC discovery context
3397 * @rport: The remote port to be removed
3398 */
3399 -static void fc_disc_del_target(struct fc_lport *lport, struct fc_rport *rport)
3400 +static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
3401 {
3402 - lport->tt.rport_stop(rport);
3403 + struct fc_lport *lport = disc->lport;
3404 + struct fc_rport_libfc_priv *rdata = RPORT_TO_PRIV(rport);
3405 + list_del(&rdata->peers);
3406 + lport->tt.rport_logoff(rport);
3407 }
3408
3409 /**
3410 * fc_disc_done - Discovery has been completed
3411 - * @lport: FC local port
3412 + * @disc: FC discovery context
3413 */
3414 -static void fc_disc_done(struct fc_lport *lport)
3415 +static void fc_disc_done(struct fc_disc *disc)
3416 {
3417 - lport->disc_done = 1;
3418 - lport->disc_pending = 0;
3419 - if (lport->disc_requested)
3420 - lport->tt.disc_start(lport);
3421 + struct fc_lport *lport = disc->lport;
3422 +
3423 + FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
3424 + fc_host_port_id(lport->host));
3425 +
3426 + disc->disc_callback(lport, disc->event);
3427 + disc->event = DISC_EV_NONE;
3428 +
3429 + if (disc->requested)
3430 + fc_disc_gpn_ft_req(disc);
3431 + else
3432 + disc->pending = 0;
3433 }
3434
3435 /**
3436 - * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
3437 - * @lport: FC local port
3438 + * fc_disc_error - Handle error on dNS request
3439 + * @disc: FC discovery context
3440 + * @fp: The frame pointer
3441 */
3442 -static void fc_disc_gpn_ft_req(struct fc_lport *lport)
3443 +static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
3444 {
3445 - struct fc_frame *fp;
3446 - struct fc_seq *sp = NULL;
3447 - struct req {
3448 - struct fc_ct_hdr ct;
3449 - struct fc_ns_gid_ft gid;
3450 - } *rp;
3451 - int error = 0;
3452 -
3453 - lport->disc_buf_len = 0;
3454 - lport->disc_seq_count = 0;
3455 - fp = fc_frame_alloc(lport, sizeof(*rp));
3456 - if (!fp) {
3457 - error = ENOMEM;
3458 - } else {
3459 - rp = fc_frame_payload_get(fp, sizeof(*rp));
3460 - fc_fill_dns_hdr(lport, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
3461 - rp->gid.fn_fc4_type = FC_TYPE_FCP;
3462 -
3463 - WARN_ON(!fc_lport_test_ready(lport));
3464 -
3465 - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
3466 - sp = lport->tt.exch_seq_send(lport, fp,
3467 - fc_disc_gpn_ft_resp, NULL,
3468 - lport, lport->e_d_tov,
3469 - fc_host_port_id(lport->host),
3470 - FC_FID_DIR_SERV,
3471 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
3472 + struct fc_lport *lport = disc->lport;
3473 + unsigned long delay = 0;
3474 + if (fc_disc_debug)
3475 + FC_DBG("Error %ld, retries %d/%d\n",
3476 + PTR_ERR(fp), disc->retry_count,
3477 + FC_DISC_RETRY_LIMIT);
3478 +
3479 + if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
3480 + /*
3481 + * Memory allocation failure, or the exchange timed out,
3482 + * retry after delay.
3483 + */
3484 + if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
3485 + /* go ahead and retry */
3486 + if (!fp)
3487 + delay = msecs_to_jiffies(500);
3488 + else {
3489 + delay = jiffies +
3490 + msecs_to_jiffies(lport->e_d_tov);
3491 +
3492 + /* timeout faster first time */
3493 + if (!disc->retry_count)
3494 + delay /= 4;
3495 + }
3496 + disc->retry_count++;
3497 + schedule_delayed_work(&disc->disc_work,
3498 + delay);
3499 + } else {
3500 + /* exceeded retries */
3501 + disc->event = DISC_EV_FAILED;
3502 + fc_disc_done(disc);
3503 + }
3504 }
3505 - if (error || !sp)
3506 - fc_disc_retry(lport);
3507 }
3508
3509 /**
3510 - * fc_disc_error - Handle error on dNS request
3511 - * @lport: FC local port
3512 - * @fp: The frame pointer
3513 + * fc_disc_gpn_ft_req - Send Get Port Names by FC-4 type (GPN_FT) request
3514 + * @lport: FC discovery context
3515 + *
3516 + * Locking Note: This function expects that the disc_mutex is locked
3517 + * before it is called.
3518 */
3519 -static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp)
3520 +static void fc_disc_gpn_ft_req(struct fc_disc *disc)
3521 {
3522 - long err = PTR_ERR(fp);
3523 + struct fc_frame *fp;
3524 + struct fc_lport *lport = disc->lport;
3525
3526 - FC_DEBUG_DISC("Error %ld, retries %d/%d\n", PTR_ERR(fp),
3527 - lport->retry_count, FC_DISC_RETRY_LIMIT);
3528 + WARN_ON(!fc_lport_test_ready(lport));
3529
3530 - switch (err) {
3531 - case -FC_EX_TIMEOUT:
3532 - if (lport->disc_retry_count++ < FC_DISC_RETRY_LIMIT) {
3533 - fc_disc_gpn_ft_req(lport);
3534 - } else {
3535 - fc_disc_done(lport);
3536 - }
3537 - break;
3538 - default:
3539 - FC_DBG("Error code %ld not supported\n", err);
3540 - fc_disc_done(lport);
3541 - break;
3542 - }
3543 + disc->pending = 1;
3544 + disc->requested = 0;
3545 +
3546 + disc->buf_len = 0;
3547 + disc->seq_count = 0;
3548 + fp = fc_frame_alloc(lport,
3549 + sizeof(struct fc_ct_hdr) +
3550 + sizeof(struct fc_ns_gid_ft));
3551 + if (!fp)
3552 + goto err;
3553 +
3554 + if (lport->tt.elsct_send(lport, NULL, fp,
3555 + FC_NS_GPN_FT,
3556 + fc_disc_gpn_ft_resp,
3557 + disc, lport->e_d_tov))
3558 + return;
3559 +err:
3560 + fc_disc_error(disc, fp);
3561 }
3562
3563 /**
3564 @@ -396,8 +637,9 @@ static void fc_disc_error(struct fc_lport *lport, struct fc_frame *fp)
3565 * @buf: GPN_FT response buffer
3566 * @len: size of response buffer
3567 */
3568 -static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3569 +static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
3570 {
3571 + struct fc_lport *lport;
3572 struct fc_gpn_ft_resp *np;
3573 char *bp;
3574 size_t plen;
3575 @@ -407,13 +649,15 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3576 struct fc_rport *rport;
3577 struct fc_rport_libfc_priv *rdata;
3578
3579 + lport = disc->lport;
3580 +
3581 /*
3582 * Handle partial name record left over from previous call.
3583 */
3584 bp = buf;
3585 plen = len;
3586 np = (struct fc_gpn_ft_resp *)bp;
3587 - tlen = lport->disc_buf_len;
3588 + tlen = disc->buf_len;
3589 if (tlen) {
3590 WARN_ON(tlen >= sizeof(*np));
3591 plen = sizeof(*np) - tlen;
3592 @@ -421,7 +665,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3593 WARN_ON(plen >= sizeof(*np));
3594 if (plen > len)
3595 plen = len;
3596 - np = &lport->disc_buf;
3597 + np = &disc->partial_buf;
3598 memcpy((char *)np + tlen, bp, plen);
3599
3600 /*
3601 @@ -431,9 +675,9 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3602 bp -= tlen;
3603 len += tlen;
3604 plen += tlen;
3605 - lport->disc_buf_len = (unsigned char) plen;
3606 + disc->buf_len = (unsigned char) plen;
3607 if (plen == sizeof(*np))
3608 - lport->disc_buf_len = 0;
3609 + disc->buf_len = 0;
3610 }
3611
3612 /*
3613 @@ -455,7 +699,7 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3614 rport = fc_rport_rogue_create(&dp);
3615 if (rport) {
3616 rdata = rport->dd_data;
3617 - rdata->event_callback = lport->tt.event_callback;
3618 + rdata->event_callback = fc_disc_rport_event;
3619 rdata->local_port = lport;
3620 lport->tt.rport_login(rport);
3621 } else
3622 @@ -465,7 +709,8 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3623 }
3624
3625 if (np->fp_flags & FC_NS_FID_LAST) {
3626 - fc_disc_done(lport);
3627 + disc->event = DISC_EV_SUCCESS;
3628 + fc_disc_done(disc);
3629 len = 0;
3630 break;
3631 }
3632 @@ -479,11 +724,15 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3633 * Save any partial record at the end of the buffer for next time.
3634 */
3635 if (error == 0 && len > 0 && len < sizeof(*np)) {
3636 - if (np != &lport->disc_buf)
3637 - memcpy(&lport->disc_buf, np, len);
3638 - lport->disc_buf_len = (unsigned char) len;
3639 + if (np != &disc->partial_buf) {
3640 + FC_DEBUG_DISC("Partial buffer remains "
3641 + "for discovery by (%6x)\n",
3642 + fc_host_port_id(lport->host));
3643 + memcpy(&disc->partial_buf, np, len);
3644 + }
3645 + disc->buf_len = (unsigned char) len;
3646 } else {
3647 - lport->disc_buf_len = 0;
3648 + disc->buf_len = 0;
3649 }
3650 return error;
3651 }
3652 @@ -493,14 +742,13 @@ static int fc_disc_gpn_ft_parse(struct fc_lport *lport, void *buf, size_t len)
3653 */
3654 static void fc_disc_timeout(struct work_struct *work)
3655 {
3656 - struct fc_lport *lport;
3657 -
3658 - lport = container_of(work, struct fc_lport, disc_work.work);
3659 -
3660 - if (lport->disc_pending)
3661 - fc_disc_gpn_ft_req(lport);
3662 - else
3663 - lport->tt.disc_start(lport);
3664 + struct fc_disc *disc = container_of(work,
3665 + struct fc_disc,
3666 + disc_work.work);
3667 + mutex_lock(&disc->disc_mutex);
3668 + if (disc->requested && !disc->pending)
3669 + fc_disc_gpn_ft_req(disc);
3670 + mutex_unlock(&disc->disc_mutex);
3671 }
3672
3673 /**
3674 @@ -509,12 +757,13 @@ static void fc_disc_timeout(struct work_struct *work)
3675 * @fp: response frame
3676 * @lp_arg: Fibre Channel host port instance
3677 *
3678 - * The response may be in multiple frames
3679 + * Locking Note: This function expects that the disc_mutex is locked
3680 + * before it is called.
3681 */
3682 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3683 - void *lp_arg)
3684 + void *disc_arg)
3685 {
3686 - struct fc_lport *lport = lp_arg;
3687 + struct fc_disc *disc = disc_arg;
3688 struct fc_ct_hdr *cp;
3689 struct fc_frame_header *fh;
3690 unsigned int seq_cnt;
3691 @@ -522,8 +771,11 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3692 unsigned int len;
3693 int error;
3694
3695 + FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
3696 + fc_host_port_id(disc->lport->host));
3697 +
3698 if (IS_ERR(fp)) {
3699 - fc_disc_error(lport, fp);
3700 + fc_disc_error(disc, fp);
3701 return;
3702 }
3703
3704 @@ -532,9 +784,9 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3705 len = fr_len(fp) - sizeof(*fh);
3706 seq_cnt = ntohs(fh->fh_seq_cnt);
3707 if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
3708 - lport->disc_seq_count == 0) {
3709 + disc->seq_count == 0) {
3710 cp = fc_frame_payload_get(fp, sizeof(*cp));
3711 - if (cp == NULL) {
3712 + if (!cp) {
3713 FC_DBG("GPN_FT response too short, len %d\n",
3714 fr_len(fp));
3715 } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
3716 @@ -548,25 +800,26 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3717 FC_DBG("GPN_FT rejected reason %x exp %x "
3718 "(check zoning)\n", cp->ct_reason,
3719 cp->ct_explan);
3720 - fc_disc_done(lport);
3721 + disc->event = DISC_EV_FAILED;
3722 + fc_disc_done(disc);
3723 } else {
3724 FC_DBG("GPN_FT unexpected response code %x\n",
3725 ntohs(cp->ct_cmd));
3726 }
3727 } else if (fr_sof(fp) == FC_SOF_N3 &&
3728 - seq_cnt == lport->disc_seq_count) {
3729 + seq_cnt == disc->seq_count) {
3730 buf = fh + 1;
3731 } else {
3732 FC_DBG("GPN_FT unexpected frame - out of sequence? "
3733 "seq_cnt %x expected %x sof %x eof %x\n",
3734 - seq_cnt, lport->disc_seq_count, fr_sof(fp), fr_eof(fp));
3735 + seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
3736 }
3737 if (buf) {
3738 - error = fc_disc_gpn_ft_parse(lport, buf, len);
3739 + error = fc_disc_gpn_ft_parse(disc, buf, len);
3740 if (error)
3741 - fc_disc_retry(lport);
3742 + fc_disc_error(disc, fp);
3743 else
3744 - lport->disc_seq_count++;
3745 + disc->seq_count++;
3746 }
3747 fc_frame_free(fp);
3748 }
3749 @@ -576,27 +829,31 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
3750 * @lport: FC local port
3751 * @dp: The port to rediscover
3752 *
3753 - * This could be from an RSCN that reported a change for the target.
3754 + * Locking Note: This function expects that the disc_mutex is locked
3755 + * before it is called.
3756 */
3757 -static void fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
3758 +static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
3759 {
3760 + struct fc_lport *lport;
3761 struct fc_rport *rport;
3762 struct fc_rport *new_rport;
3763 struct fc_rport_libfc_priv *rdata;
3764
3765 + lport = disc->lport;
3766 +
3767 if (dp->ids.port_id == fc_host_port_id(lport->host))
3768 goto out;
3769
3770 rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
3771 if (rport) {
3772 - fc_disc_del_target(lport, rport);
3773 + fc_disc_del_target(disc, rport);
3774 put_device(&rport->dev); /* hold from lookup */
3775 }
3776
3777 new_rport = fc_rport_rogue_create(dp);
3778 if (new_rport) {
3779 rdata = new_rport->dd_data;
3780 - rdata->event_callback = lport->tt.event_callback;
3781 + rdata->event_callback = fc_disc_rport_event;
3782 kfree(dp);
3783 lport->tt.rport_login(new_rport);
3784 }
3785 @@ -606,19 +863,70 @@ out:
3786 }
3787
3788 /**
3789 + * fc_disc_stop - Stop discovery for a given lport
3790 + * @lport: The lport that discovery should stop for
3791 + */
3792 +void fc_disc_stop(struct fc_lport *lport)
3793 +{
3794 + struct fc_disc *disc, *next;
3795 +
3796 + mutex_lock(&disc_list_lock);
3797 + list_for_each_entry_safe(disc, next, &disc_list, list) {
3798 + if (disc->lport == lport) {
3799 + cancel_delayed_work_sync(&disc->disc_work);
3800 + fc_disc_stop_rports(disc);
3801 + }
3802 + }
3803 + mutex_unlock(&disc_list_lock);
3804 +}
3805 +
3806 +/**
3807 + * fc_disc_stop_final - Stop discovery for a given lport
3808 + * @lport: The lport that discovery should stop for
3809 + *
3810 + * This function will block until discovery has been
3811 + * completely stopped and all rports have been deleted.
3812 + */
3813 +void fc_disc_stop_final(struct fc_lport *lport)
3814 +{
3815 + struct fc_disc *disc, *next;
3816 + fc_disc_stop(lport);
3817 + lport->tt.rport_flush_queue();
3818 +
3819 + mutex_lock(&disc_list_lock);
3820 + list_for_each_entry_safe(disc, next, &disc_list, list) {
3821 + if (disc->lport == lport) {
3822 + list_del(&disc->list);
3823 + kfree(disc);
3824 + }
3825 + }
3826 + mutex_unlock(&disc_list_lock);
3827 +}
3828 +
3829 +/**
3830 * fc_disc_init - Initialize the discovery block
3831 * @lport: FC local port
3832 */
3833 int fc_disc_init(struct fc_lport *lport)
3834 {
3835 - INIT_DELAYED_WORK(&lport->disc_work, fc_disc_timeout);
3836 + INIT_LIST_HEAD(&disc_list);
3837 + mutex_init(&disc_list_lock);
3838
3839 if (!lport->tt.disc_start)
3840 lport->tt.disc_start = fc_disc_start;
3841
3842 + if (!lport->tt.disc_stop)
3843 + lport->tt.disc_stop = fc_disc_stop;
3844 +
3845 + if (!lport->tt.disc_stop_final)
3846 + lport->tt.disc_stop_final = fc_disc_stop_final;
3847 +
3848 if (!lport->tt.disc_recv_req)
3849 lport->tt.disc_recv_req = fc_disc_recv_req;
3850
3851 + if (!lport->tt.rport_lookup)
3852 + lport->tt.rport_lookup = fc_disc_lookup_rport;
3853 +
3854 return 0;
3855 }
3856 EXPORT_SYMBOL(fc_disc_init);
3857 diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
3858 new file mode 100644
3859 index 0000000..dd47fe6
3860 --- /dev/null
3861 +++ b/drivers/scsi/libfc/fc_elsct.c
3862 @@ -0,0 +1,71 @@
3863 +/*
3864 + * Copyright(c) 2008 Intel Corporation. All rights reserved.
3865 + *
3866 + * This program is free software; you can redistribute it and/or modify it
3867 + * under the terms and conditions of the GNU General Public License,
3868 + * version 2, as published by the Free Software Foundation.
3869 + *
3870 + * This program is distributed in the hope it will be useful, but WITHOUT
3871 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3872 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3873 + * more details.
3874 + *
3875 + * You should have received a copy of the GNU General Public License along with
3876 + * this program; if not, write to the Free Software Foundation, Inc.,
3877 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
3878 + *
3879 + * Maintained at www.Open-FCoE.org
3880 + */
3881 +
3882 +/*
3883 + * Provide interface to send ELS/CT FC frames
3884 + */
3885 +
3886 +#include <asm/unaligned.h>
3887 +#include <scsi/fc/fc_gs.h>
3888 +#include <scsi/fc/fc_ns.h>
3889 +#include <scsi/fc/fc_els.h>
3890 +#include <scsi/libfc.h>
3891 +#include <scsi/fc_encode.h>
3892 +
3893 +/*
3894 + * fc_elsct_send - sends ELS/CT frame
3895 + */
3896 +static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
3897 + struct fc_rport *rport,
3898 + struct fc_frame *fp,
3899 + unsigned int op,
3900 + void (*resp)(struct fc_seq *,
3901 + struct fc_frame *fp,
3902 + void *arg),
3903 + void *arg, u32 timer_msec)
3904 +{
3905 + enum fc_rctl r_ctl;
3906 + u32 did;
3907 + enum fc_fh_type fh_type;
3908 + int rc;
3909 +
3910 + /* ELS requests */
3911 + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
3912 + rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
3913 + else
3914 + /* CT requests */
3915 + rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
3916 +
3917 + if (rc)
3918 + return NULL;
3919 +
3920 + fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type,
3921 + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
3922 +
3923 + return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
3924 +}
3925 +
3926 +int fc_elsct_init(struct fc_lport *lport)
3927 +{
3928 + if (!lport->tt.elsct_send)
3929 + lport->tt.elsct_send = fc_elsct_send;
3930 +
3931 + return 0;
3932 +}
3933 +EXPORT_SYMBOL(fc_elsct_init);
3934 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
3935 index 67c5bad..12a1196 100644
3936 --- a/drivers/scsi/libfc/fc_exch.c
3937 +++ b/drivers/scsi/libfc/fc_exch.c
3938 @@ -29,7 +29,8 @@
3939
3940 #include <scsi/fc/fc_fc2.h>
3941
3942 -#include <scsi/libfc/libfc.h>
3943 +#include <scsi/libfc.h>
3944 +#include <scsi/fc_encode.h>
3945
3946 #define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
3947
3948 @@ -60,66 +61,6 @@ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
3949 */
3950
3951 /*
3952 - * Sequence.
3953 - */
3954 -struct fc_seq {
3955 - u8 id; /* seq ID */
3956 - u16 ssb_stat; /* status flags for sequence status block */
3957 - u16 cnt; /* frames sent so far on sequence */
3958 - u32 f_ctl; /* F_CTL flags for frames */
3959 - u32 rec_data; /* FC-4 value for REC */
3960 -};
3961 -
3962 -struct fc_exch;
3963 -
3964 -#define FC_EX_DONE (1 << 0) /* ep is completed */
3965 -#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
3966 -
3967 -/*
3968 - * Exchange.
3969 - *
3970 - * Locking notes: The ex_lock protects changes to the following fields:
3971 - * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
3972 - * seq_id
3973 - * sequence allocation
3974 - *
3975 - * If the em_lock and ex_lock must be taken at the same time, the
3976 - * em_lock must be taken before the ex_lock.
3977 - */
3978 -struct fc_exch {
3979 - struct fc_exch_mgr *em; /* exchange manager */
3980 - u32 state; /* internal driver state */
3981 - u16 xid; /* our exchange ID */
3982 - struct list_head ex_list; /* free or busy list linkage */
3983 - spinlock_t ex_lock; /* lock covering exchange state */
3984 - atomic_t ex_refcnt; /* reference counter */
3985 - struct delayed_work timeout_work; /* timer for upper level protocols */
3986 - struct fc_lport *lp; /* fc device instance */
3987 - u16 oxid; /* originator's exchange ID */
3988 - u16 rxid; /* responder's exchange ID */
3989 - u32 oid; /* originator's FCID */
3990 - u32 sid; /* source FCID */
3991 - u32 did; /* destination FCID */
3992 - u32 esb_stat; /* exchange status for ESB */
3993 - u32 r_a_tov; /* r_a_tov from rport (msec) */
3994 - u8 seq_id; /* next sequence ID to use */
3995 - u32 f_ctl; /* F_CTL flags for sequences */
3996 - u8 fh_type; /* frame type */
3997 - enum fc_class class; /* class of service */
3998 - struct fc_seq seq; /* single sequence */
3999 - /*
4000 - * Handler for responses to this current exchange.
4001 - */
4002 - void (*resp)(struct fc_seq *, struct fc_frame *, void *);
4003 - void (*destructor)(struct fc_seq *, void *);
4004 - /*
4005 - * arg is passed as void pointer to exchange
4006 - * resp and destructor handlers
4007 - */
4008 - void *arg;
4009 -};
4010 -
4011 -/*
4012 * Exchange manager.
4013 *
4014 * This structure is the center for creating exchanges and sequences.
4015 @@ -131,6 +72,8 @@ struct fc_exch_mgr {
4016 u16 last_xid; /* last allocated exchange ID */
4017 u16 min_xid; /* min exchange ID */
4018 u16 max_xid; /* max exchange ID */
4019 + u16 max_read; /* max exchange ID for read */
4020 + u16 last_read; /* last xid allocated for read */
4021 u32 total_exches; /* total allocated exchanges */
4022 struct list_head ex_list; /* allocated exchanges list */
4023 struct fc_lport *lp; /* fc device instance */
4024 @@ -151,14 +94,12 @@ struct fc_exch_mgr {
4025 } stats;
4026 struct fc_exch **exches; /* for exch pointers indexed by xid */
4027 };
4028 -
4029 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
4030 -#define fc_exch_next_xid(mp, id) ((id == mp->max_xid) ? mp->min_xid : id + 1)
4031
4032 static void fc_exch_rrq(struct fc_exch *);
4033 static void fc_seq_ls_acc(struct fc_seq *);
4034 static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
4035 - enum fc_els_rjt_explan);
4036 + enum fc_els_rjt_explan);
4037 static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
4038 static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
4039 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
4040 @@ -274,34 +215,57 @@ static void fc_exch_hold(struct fc_exch *ep)
4041 }
4042
4043 /*
4044 - * Fill in frame header.
4045 - *
4046 - * The following fields are the responsibility of this routine:
4047 - * d_id, s_id, df_ctl, oxid, rxid, cs_ctl, seq_id
4048 - *
4049 - * The following fields are handled by the caller.
4050 - * r_ctl, type, f_ctl, seq_cnt, parm_offset
4051 - *
4052 - * That should be a complete list.
4053 - *
4054 - * We may be the originator or responder to the sequence.
4055 + * setup fc hdr by initializing few more FC header fields and sof/eof.
4056 + * Initialized fields by this func:
4057 + * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
4058 + * - sof and eof
4059 */
4060 -static void fc_seq_fill_hdr(struct fc_seq *sp, struct fc_frame *fp)
4061 +static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
4062 + u32 f_ctl)
4063 {
4064 struct fc_frame_header *fh = fc_frame_header_get(fp);
4065 - struct fc_exch *ep;
4066 + u16 fill;
4067
4068 - ep = fc_seq_exch(sp);
4069 + fr_sof(fp) = ep->class;
4070 + if (ep->seq.cnt)
4071 + fr_sof(fp) = fc_sof_normal(ep->class);
4072 +
4073 + if (f_ctl & FC_FC_END_SEQ) {
4074 + fr_eof(fp) = FC_EOF_T;
4075 + if (fc_sof_needs_ack(ep->class))
4076 + fr_eof(fp) = FC_EOF_N;
4077 + /*
4078 + * Form f_ctl.
4079 + * The number of fill bytes to make the length a 4-byte
4080 + * multiple is the low order 2-bits of the f_ctl.
4081 + * The fill itself will have been cleared by the frame
4082 + * allocation.
4083 + * After this, the length will be even, as expected by
4084 + * the transport.
4085 + */
4086 + fill = fr_len(fp) & 3;
4087 + if (fill) {
4088 + fill = 4 - fill;
4089 + /* TODO, this may be a problem with fragmented skb */
4090 + skb_put(fp_skb(fp), fill);
4091 + hton24(fh->fh_f_ctl, f_ctl | fill);
4092 + }
4093 + } else {
4094 + WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
4095 + fr_eof(fp) = FC_EOF_N;
4096 + }
4097
4098 - hton24(fh->fh_s_id, ep->sid);
4099 - hton24(fh->fh_d_id, ep->did);
4100 + /*
4101 + * Initialize remainig fh fields
4102 + * from fc_fill_fc_hdr
4103 + */
4104 fh->fh_ox_id = htons(ep->oxid);
4105 fh->fh_rx_id = htons(ep->rxid);
4106 - fh->fh_seq_id = sp->id;
4107 - fh->fh_cs_ctl = 0;
4108 - fh->fh_df_ctl = 0;
4109 + fh->fh_seq_id = ep->seq.id;
4110 + fh->fh_seq_cnt = htons(ep->seq.cnt);
4111 }
4112
4113 +
4114 /*
4115 * Release a reference to an exchange.
4116 * If the refcnt goes to zero and the exchange is complete, it is freed.
4117 @@ -432,8 +396,9 @@ int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
4118 */
4119 fp = fc_frame_alloc(ep->lp, 0);
4120 if (fp) {
4121 - fc_frame_setup(fp, FC_RCTL_BA_ABTS, FC_TYPE_BLS);
4122 - error = fc_seq_send(ep->lp, sp, fp, FC_FC_END_SEQ);
4123 + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
4124 + FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
4125 + error = fc_seq_send(ep->lp, sp, fp);
4126 } else
4127 error = -ENOBUFS;
4128 return error;
4129 @@ -508,36 +473,66 @@ static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
4130 }
4131
4132 /*
4133 - * Allocate an exchange.
4134 + * fc_em_alloc_xid - returns an xid based on request type
4135 + * @lp : ptr to associated lport
4136 + * @fp : ptr to the assocated frame
4137 *
4138 - * if xid is supplied zero then assign next free exchange ID
4139 - * from exchange manager, otherwise use supplied xid.
4140 - * Returns with exch lock held.
4141 + * check the associated fc_fsp_pkt to get scsi command type and
4142 + * command direction to decide from which range this exch id
4143 + * will be allocated from.
4144 + *
4145 + * Returns : 0 or an valid xid
4146 */
4147 -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
4148 +static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
4149 {
4150 + u16 xid, min, max;
4151 + u16 *plast;
4152 struct fc_exch *ep = NULL;
4153 - u16 min_xid, max_xid;
4154
4155 - min_xid = mp->min_xid;
4156 - max_xid = mp->max_xid;
4157 - /*
4158 - * if xid is supplied then verify its xid range
4159 - */
4160 - if (xid) {
4161 - if (unlikely((xid < min_xid) || (xid > max_xid))) {
4162 - FC_DBG("Invalid xid 0x:%x\n", xid);
4163 - goto out;
4164 - }
4165 - if (unlikely(mp->exches[xid - min_xid] != NULL)) {
4166 - FC_DBG("xid 0x:%x is already in use\n", xid);
4167 - goto out;
4168 + if (mp->max_read) {
4169 + if (fc_frame_is_read(fp)) {
4170 + min = mp->min_xid;
4171 + max = mp->max_read;
4172 + plast = &mp->last_read;
4173 + } else {
4174 + min = mp->max_read + 1;
4175 + max = mp->max_xid;
4176 + plast = &mp->last_xid;
4177 }
4178 + } else {
4179 + min = mp->min_xid;
4180 + max = mp->max_xid;
4181 + plast = &mp->last_xid;
4182 }
4183 + xid = *plast;
4184 + do {
4185 + xid = (xid == max) ? min : xid + 1;
4186 + ep = mp->exches[xid - mp->min_xid];
4187 + } while ((ep != NULL) && (xid != *plast));
4188
4189 - /*
4190 - * Allocate new exchange
4191 - */
4192 + if (unlikely(ep))
4193 + xid = 0;
4194 + else
4195 + *plast = xid;
4196 +
4197 + return xid;
4198 +}
4199 +
4200 +/*
4201 + * fc_exch_alloc - allocate an exchange.
4202 + * @mp : ptr to the exchange manager
4203 + * @xid: input xid
4204 + *
4205 + * if xid is supplied zero then assign next free exchange ID
4206 + * from exchange manager, otherwise use supplied xid.
4207 + * Returns with exch lock held.
4208 + */
4209 +struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
4210 + struct fc_frame *fp, u16 xid)
4211 +{
4212 + struct fc_exch *ep = NULL;
4213 +
4214 + /* allocate memory for exchange */
4215 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
4216 if (!ep) {
4217 atomic_inc(&mp->stats.no_free_exch);
4218 @@ -546,40 +541,26 @@ struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
4219 memset(ep, 0, sizeof(*ep));
4220
4221 spin_lock_bh(&mp->em_lock);
4222 -
4223 - /*
4224 - * if xid is zero then assign next free exchange ID
4225 - */
4226 + /* alloc xid if input xid 0 */
4227 if (!xid) {
4228 - xid = fc_exch_next_xid(mp, mp->last_xid);
4229 - /*
4230 - * find next free xid using linear search
4231 - */
4232 - while (mp->exches[xid - min_xid] != NULL) {
4233 - if (xid == mp->last_xid)
4234 - break;
4235 - xid = fc_exch_next_xid(mp, xid);
4236 - }
4237 -
4238 - if (unlikely(mp->exches[xid - min_xid] != NULL))
4239 + /* alloc a new xid */
4240 + xid = fc_em_alloc_xid(mp, fp);
4241 + if (!xid) {
4242 + printk(KERN_ERR "fc_em_alloc_xid() failed\n");
4243 goto err;
4244 - mp->last_xid = xid;
4245 + }
4246 }
4247
4248 - /* lport lock ? */
4249 - if (mp->lp->state == LPORT_ST_RESET)
4250 - goto err; /* don't add new ep during local port reset */
4251 -
4252 fc_exch_hold(ep); /* hold for exch in mp */
4253 spin_lock_init(&ep->ex_lock);
4254 /*
4255 * Hold exch lock for caller to prevent fc_exch_reset()
4256 - * from releasing exch while fc_exch_alloc() caller is
4257 + * from releasing exch while fc_exch_alloc() caller is
4258 * still working on exch.
4259 */
4260 spin_lock_bh(&ep->ex_lock);
4261
4262 - mp->exches[xid - min_xid] = ep;
4263 + mp->exches[xid - mp->min_xid] = ep;
4264 list_add_tail(&ep->ex_list, &mp->ex_list);
4265 fc_seq_alloc(ep, ep->seq_id++);
4266 mp->total_exches++;
4267 @@ -874,55 +855,17 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
4268 }
4269 EXPORT_SYMBOL(fc_seq_start_next);
4270
4271 -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
4272 - struct fc_frame *fp, u32 f_ctl)
4273 +int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
4274 {
4275 struct fc_exch *ep;
4276 - struct fc_frame_header *fh;
4277 - enum fc_class class;
4278 - u16 fill = 0;
4279 + struct fc_frame_header *fh = fc_frame_header_get(fp);
4280 int error;
4281
4282 ep = fc_seq_exch(sp);
4283 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
4284
4285 - fc_seq_fill_hdr(sp, fp);
4286 - fh = fc_frame_header_get(fp);
4287 - class = ep->class;
4288 - fr_sof(fp) = class;
4289 - if (sp->cnt)
4290 - fr_sof(fp) = fc_sof_normal(class);
4291 -
4292 - if (f_ctl & FC_FC_END_SEQ) {
4293 - fr_eof(fp) = FC_EOF_T;
4294 - if (fc_sof_needs_ack(class))
4295 - fr_eof(fp) = FC_EOF_N;
4296 - /*
4297 - * Form f_ctl.
4298 - * The number of fill bytes to make the length a 4-byte
4299 - * multiple is the low order 2-bits of the f_ctl.
4300 - * The fill itself will have been cleared by the frame
4301 - * allocation.
4302 - * After this, the length will be even, as expected by
4303 - * the transport. Don't include the fill in the f_ctl
4304 - * saved in the sequence.
4305 - */
4306 - fill = fr_len(fp) & 3;
4307 - if (fill) {
4308 - fill = 4 - fill;
4309 - /* TODO, this may be a problem with fragmented skb */
4310 - skb_put(fp_skb(fp), fill);
4311 - }
4312 - f_ctl |= sp->f_ctl | ep->f_ctl;
4313 - } else {
4314 - WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
4315 - f_ctl |= sp->f_ctl | ep->f_ctl;
4316 - f_ctl &= ~FC_FC_SEQ_INIT;
4317 - fr_eof(fp) = FC_EOF_N;
4318 - }
4319 -
4320 - hton24(fh->fh_f_ctl, f_ctl | fill);
4321 - fh->fh_seq_cnt = htons(sp->cnt);
4322 + sp->f_ctl = ntoh24(fh->fh_f_ctl);
4323 + fc_exch_setup_hdr(ep, fp, sp->f_ctl);
4324
4325 /*
4326 * update sequence count if this frame is carrying
4327 @@ -946,12 +889,10 @@ int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
4328 * We can only be called to send once for each sequence.
4329 */
4330 spin_lock_bh(&ep->ex_lock);
4331 - sp->f_ctl = f_ctl; /* save for possible abort */
4332 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
4333 - if (f_ctl & FC_FC_END_SEQ) {
4334 - if (f_ctl & FC_FC_SEQ_INIT)
4335 - ep->esb_stat &= ~ESB_ST_SEQ_INIT;
4336 - }
4337 + sp->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
4338 + if (sp->f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
4339 + ep->esb_stat &= ~ESB_ST_SEQ_INIT;
4340 spin_unlock_bh(&ep->ex_lock);
4341 return error;
4342 }
4343 @@ -986,10 +927,11 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
4344 enum fc_rctl rctl, enum fc_fh_type fh_type)
4345 {
4346 u32 f_ctl;
4347 + struct fc_exch *ep = fc_seq_exch(sp);
4348
4349 - fc_frame_setup(fp, rctl, fh_type);
4350 - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
4351 - fc_seq_send(fc_seq_exch(sp)->lp, sp, fp, f_ctl);
4352 + f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
4353 + fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
4354 + fc_seq_send(fc_seq_exch(sp)->lp, sp, fp);
4355 }
4356
4357 /*
4358 @@ -1001,7 +943,8 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
4359 struct fc_frame *fp;
4360 struct fc_frame_header *rx_fh;
4361 struct fc_frame_header *fh;
4362 - struct fc_lport *lp = fc_seq_exch(sp)->lp;
4363 + struct fc_exch *ep = fc_seq_exch(sp);
4364 + struct fc_lport *lp = ep->lp;
4365 unsigned int f_ctl;
4366
4367 /*
4368 @@ -1013,7 +956,6 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
4369 if (!fp)
4370 return;
4371
4372 - fc_seq_fill_hdr(sp, fp);
4373 fh = fc_frame_header_get(fp);
4374 fh->fh_r_ctl = FC_RCTL_ACK_1;
4375 fh->fh_type = FC_TYPE_BLS;
4376 @@ -1034,6 +976,7 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
4377 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
4378 hton24(fh->fh_f_ctl, f_ctl);
4379
4380 + fc_exch_setup_hdr(ep, fp, f_ctl);
4381 fh->fh_seq_id = rx_fh->fh_seq_id;
4382 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
4383 fh->fh_parm_offset = htonl(1); /* ack single frame */
4384 @@ -1514,7 +1457,7 @@ static void fc_exch_reset(struct fc_exch *ep)
4385 * a deadlock).
4386 */
4387 if (cancel_delayed_work(&ep->timeout_work))
4388 - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4389 + atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4390 resp = ep->resp;
4391 ep->resp = NULL;
4392 if (ep->esb_stat & ESB_ST_REC_QUAL)
4393 @@ -1565,22 +1508,6 @@ restart:
4394 }
4395 EXPORT_SYMBOL(fc_exch_mgr_reset);
4396
4397 -void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid)
4398 -{
4399 - struct fc_exch *ep;
4400 -
4401 - ep = fc_seq_exch(sp);
4402 - *oxid = ep->oxid;
4403 - *rxid = ep->rxid;
4404 -}
4405 -EXPORT_SYMBOL(fc_seq_get_xids);
4406 -
4407 -void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data)
4408 -{
4409 - sp->rec_data = rec_data;
4410 -}
4411 -EXPORT_SYMBOL(fc_seq_set_rec_data);
4412 -
4413 /*
4414 * Handle incoming ELS REC - Read Exchange Concise.
4415 * Note that the requesting port may be different than the S_ID in the request.
4416 @@ -1648,8 +1575,8 @@ static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
4417 hton24(acc->reca_rfid, ep->sid);
4418 acc->reca_fc4value = htonl(ep->seq.rec_data);
4419 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
4420 - ESB_ST_SEQ_INIT |
4421 - ESB_ST_COMPLETE));
4422 + ESB_ST_SEQ_INIT |
4423 + ESB_ST_COMPLETE));
4424 sp = fc_seq_start_next(sp);
4425 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
4426 out:
4427 @@ -1723,7 +1650,6 @@ static void fc_exch_rrq(struct fc_exch *ep)
4428 fp = fc_frame_alloc(lp, sizeof(*rrq));
4429 if (!fp)
4430 return;
4431 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
4432 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
4433 memset(rrq, 0, sizeof(*rrq));
4434 rrq->rrq_cmd = ELS_RRQ;
4435 @@ -1734,9 +1660,13 @@ static void fc_exch_rrq(struct fc_exch *ep)
4436 did = ep->did;
4437 if (ep->esb_stat & ESB_ST_RESP)
4438 did = ep->sid;
4439 +
4440 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
4441 + fc_host_port_id(lp->host), FC_TYPE_ELS,
4442 + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
4443 +
4444 rrq_sp = fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep,
4445 - lp->e_d_tov, fc_host_port_id(lp->host), did,
4446 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4447 + lp->e_d_tov);
4448 if (!rrq_sp) {
4449 ep->esb_stat |= ESB_ST_REC_QUAL;
4450 fc_exch_timer_set_locked(ep, ep->r_a_tov);
4451 @@ -1791,7 +1721,7 @@ static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
4452 }
4453 if (ep->esb_stat & ESB_ST_COMPLETE) {
4454 if (cancel_delayed_work(&ep->timeout_work))
4455 - atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
4456 + atomic_dec(&ep->ex_refcnt); /* drop timer hold */
4457 }
4458
4459 spin_unlock_bh(&ep->ex_lock);
4460 @@ -1827,6 +1757,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
4461 /*
4462 * Memory need for EM
4463 */
4464 +#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
4465 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
4466 len += sizeof(struct fc_exch_mgr);
4467
4468 @@ -1837,10 +1768,22 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
4469 mp->class = class;
4470 mp->total_exches = 0;
4471 mp->exches = (struct fc_exch **)(mp + 1);
4472 - mp->last_xid = min_xid - 1;
4473 + mp->lp = lp;
4474 + /* adjust em exch xid range for offload */
4475 mp->min_xid = min_xid;
4476 mp->max_xid = max_xid;
4477 - mp->lp = lp;
4478 + mp->last_xid = min_xid - 1;
4479 + mp->max_read = 0;
4480 + mp->last_read = 0;
4481 + if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
4482 + mp->max_read = lp->lro_xid;
4483 + mp->last_read = min_xid - 1;
4484 + mp->last_xid = mp->max_read;
4485 + } else {
4486 + /* disable lro if no xid control over read */
4487 + lp->lro_enabled = 0;
4488 + }
4489 +
4490 INIT_LIST_HEAD(&mp->ex_list);
4491 spin_lock_init(&mp->em_lock);
4492
4493 @@ -1873,7 +1816,8 @@ struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
4494 {
4495 if (!lp || !lp->emp)
4496 return NULL;
4497 - return fc_exch_alloc(lp->emp, 0);
4498 +
4499 + return fc_exch_alloc(lp->emp, fp, 0);
4500 }
4501 EXPORT_SYMBOL(fc_exch_get);
4502
4503 @@ -1883,13 +1827,11 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
4504 struct fc_frame *fp,
4505 void *arg),
4506 void (*destructor)(struct fc_seq *, void *),
4507 - void *arg, u32 timer_msec,
4508 - u32 sid, u32 did, u32 f_ctl)
4509 + void *arg, u32 timer_msec)
4510 {
4511 struct fc_exch *ep;
4512 struct fc_seq *sp = NULL;
4513 struct fc_frame_header *fh;
4514 - u16 fill;
4515 int rc = 1;
4516
4517 ep = lp->tt.exch_get(lp, fp);
4518 @@ -1898,7 +1840,8 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
4519 return NULL;
4520 }
4521 ep->esb_stat |= ESB_ST_SEQ_INIT;
4522 - fc_exch_set_addr(ep, sid, did);
4523 + fh = fc_frame_header_get(fp);
4524 + fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
4525 ep->resp = resp;
4526 ep->destructor = destructor;
4527 ep->arg = arg;
4528 @@ -1907,43 +1850,20 @@ struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
4529 sp = &ep->seq;
4530 WARN_ON((sp->f_ctl & FC_FC_END_SEQ) != 0);
4531
4532 - fr_sof(fp) = ep->class;
4533 - if (sp->cnt)
4534 - fr_sof(fp) = fc_sof_normal(ep->class);
4535 - fr_eof(fp) = FC_EOF_T;
4536 - if (fc_sof_needs_ack(ep->class))
4537 - fr_eof(fp) = FC_EOF_N;
4538 -
4539 - fc_seq_fill_hdr(sp, fp);
4540 - /*
4541 - * Form f_ctl.
4542 - * The number of fill bytes to make the length a 4-byte multiple is
4543 - * the low order 2-bits of the f_ctl. The fill itself will have been
4544 - * cleared by the frame allocation.
4545 - * After this, the length will be even, as expected by the transport.
4546 - * Don't include the fill in the f_ctl saved in the sequence.
4547 - */
4548 - fill = fr_len(fp) & 3;
4549 - if (fill) {
4550 - fill = 4 - fill;
4551 - /* TODO, this may be a problem with fragmented skb */
4552 - skb_put(fp_skb(fp), fill);
4553 - }
4554 - f_ctl |= ep->f_ctl;
4555 - fh = fc_frame_header_get(fp);
4556 - hton24(fh->fh_f_ctl, f_ctl | fill);
4557 - fh->fh_seq_cnt = htons(sp->cnt++);
4558 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
4559 + ep->f_ctl = ntoh24(fh->fh_f_ctl);
4560 + fc_exch_setup_hdr(ep, fp, ep->f_ctl);
4561 + sp->cnt++;
4562
4563 if (unlikely(lp->tt.frame_send(lp, fp)))
4564 goto err;
4565
4566 if (timer_msec)
4567 fc_exch_timer_set_locked(ep, timer_msec);
4568 - sp->f_ctl = f_ctl; /* save for possible abort */
4569 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
4570 + sp->f_ctl = ep->f_ctl; /* save for possible abort */
4571
4572 - if (f_ctl & FC_FC_SEQ_INIT)
4573 + if (ep->f_ctl & FC_FC_SEQ_INIT)
4574 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
4575 spin_unlock_bh(&ep->ex_lock);
4576 return sp;
4577 @@ -2032,11 +1952,6 @@ int fc_exch_init(struct fc_lport *lp)
4578 if (!lp->tt.seq_exch_abort)
4579 lp->tt.seq_exch_abort = fc_seq_exch_abort;
4580
4581 - if (!lp->tt.seq_get_xids)
4582 - lp->tt.seq_get_xids = fc_seq_get_xids;
4583 -
4584 - if (!lp->tt.seq_set_rec_data)
4585 - lp->tt.seq_set_rec_data = fc_seq_set_rec_data;
4586 return 0;
4587 }
4588 EXPORT_SYMBOL(fc_exch_init);
4589 diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
4590 index 01e84dc..04ced7f 100644
4591 --- a/drivers/scsi/libfc/fc_fcp.c
4592 +++ b/drivers/scsi/libfc/fc_fcp.c
4593 @@ -36,12 +36,12 @@
4594
4595 #include <scsi/fc/fc_fc2.h>
4596
4597 -#include <scsi/libfc/libfc.h>
4598 +#include <scsi/libfc.h>
4599 +#include <scsi/fc_encode.h>
4600
4601 MODULE_AUTHOR("Open-FCoE.org");
4602 MODULE_DESCRIPTION("libfc");
4603 MODULE_LICENSE("GPL");
4604 -MODULE_VERSION("1.0.3");
4605
4606 static int fc_fcp_debug;
4607
4608 @@ -388,15 +388,23 @@ crc_err:
4609 }
4610
4611 /*
4612 - * Send SCSI data to target.
4613 + * fc_fcp_send_data - Send SCSI data to target.
4614 + * @fsp: ptr to fc_fcp_pkt
4615 + * @sp: ptr to this sequence
4616 + * @offset: starting offset for this data request
4617 + * @seq_blen: the burst length for this data request
4618 + *
4619 * Called after receiving a Transfer Ready data descriptor.
4620 * if LLD is capable of seq offload then send down seq_blen
4621 * size of data in single frame, otherwise send multiple FC
4622 * frames of max FC frame payload supported by target port.
4623 + *
4624 + * Returns : 0 for success.
4625 */
4626 static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
4627 size_t offset, size_t seq_blen)
4628 {
4629 + struct fc_exch *ep;
4630 struct scsi_cmnd *sc;
4631 struct scatterlist *sg;
4632 struct fc_frame *fp = NULL;
4633 @@ -405,7 +413,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
4634 size_t t_blen;
4635 size_t tlen;
4636 size_t sg_bytes;
4637 - size_t frame_offset;
4638 + size_t frame_offset, fh_parm_offset;
4639 int error;
4640 void *data = NULL;
4641 void *page_addr;
4642 @@ -438,7 +446,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
4643 sc = fsp->cmd;
4644
4645 remaining = seq_blen;
4646 - frame_offset = offset;
4647 + fh_parm_offset = frame_offset = offset;
4648 tlen = 0;
4649 seq = lp->tt.seq_start_next(seq);
4650 f_ctl = FC_FC_REL_OFF;
4651 @@ -501,8 +509,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
4652 data = (void *)(fr_hdr(fp)) +
4653 sizeof(struct fc_frame_header);
4654 }
4655 - fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
4656 - fc_frame_set_offset(fp, frame_offset);
4657 + fh_parm_offset = frame_offset;
4658 fr_max_payload(fp) = fsp->max_payload;
4659 }
4660 sg_bytes = min(tlen, sg->length - offset);
4661 @@ -539,28 +546,30 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
4662 tlen -= sg_bytes;
4663 remaining -= sg_bytes;
4664
4665 - if (remaining == 0) {
4666 - /*
4667 - * Send a request sequence with
4668 - * transfer sequence initiative.
4669 - */
4670 - f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
4671 - error = lp->tt.seq_send(lp, seq, fp, f_ctl);
4672 - } else if (tlen == 0) {
4673 - /*
4674 - * send fragment using for a sequence.
4675 - */
4676 - error = lp->tt.seq_send(lp, seq, fp, f_ctl);
4677 - } else {
4678 + if (tlen)
4679 continue;
4680 - }
4681 - fp = NULL;
4682
4683 + /*
4684 + * Send sequence with transfer sequence initiative in case
4685 + * this is last FCP frame of the sequence.
4686 + */
4687 + if (remaining == 0)
4688 + f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
4689 +
4690 + ep = fc_seq_exch(seq);
4691 + fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
4692 + FC_TYPE_FCP, f_ctl, fh_parm_offset);
4693 +
4694 + /*
4695 + * send fragment using for a sequence.
4696 + */
4697 + error = lp->tt.seq_send(lp, seq, fp);
4698 if (error) {
4699 WARN_ON(1); /* send error should be rare */
4700 fc_fcp_retry_cmd(fsp);
4701 return 0;
4702 }
4703 + fp = NULL;
4704 }
4705 fsp->xfer_len += seq_blen; /* premature count? */
4706 return 0;
4707 @@ -684,7 +693,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
4708 (size_t) ntohl(dd->ft_data_ro),
4709 (size_t) ntohl(dd->ft_burst_len));
4710 if (!rc)
4711 - lp->tt.seq_set_rec_data(seq, fsp->xfer_len);
4712 + seq->rec_data = fsp->xfer_len;
4713 else if (rc == -ENOMEM)
4714 fsp->state |= FC_SRB_NOMEM;
4715 } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
4716 @@ -694,7 +703,7 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
4717 */
4718 WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
4719 fc_fcp_recv_data(fsp, fp);
4720 - lp->tt.seq_set_rec_data(seq, fsp->xfer_contig_end);
4721 + seq->rec_data = fsp->xfer_contig_end;
4722 } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
4723 WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
4724
4725 @@ -833,6 +842,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
4726 {
4727 struct fc_lport *lp = fsp->lp;
4728 struct fc_seq *seq;
4729 + struct fc_exch *ep;
4730 u32 f_ctl;
4731
4732 if (fsp->state & FC_SRB_ABORT_PENDING)
4733 @@ -864,11 +874,13 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
4734 csp = lp->tt.seq_start_next(seq);
4735 conf_frame = fc_frame_alloc(fsp->lp, 0);
4736 if (conf_frame) {
4737 - fc_frame_setup(conf_frame,
4738 - FC_RCTL_DD_SOL_CTL, FC_TYPE_FCP);
4739 f_ctl = FC_FC_SEQ_INIT;
4740 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
4741 - lp->tt.seq_send(lp, csp, conf_frame, f_ctl);
4742 + ep = fc_seq_exch(seq);
4743 + fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
4744 + ep->did, ep->sid,
4745 + FC_TYPE_FCP, f_ctl, 0);
4746 + lp->tt.seq_send(lp, csp, conf_frame);
4747 }
4748 }
4749 lp->tt.exch_done(seq);
4750 @@ -947,7 +959,7 @@ static void fc_fcp_abort_io(struct fc_lport *lp)
4751 * This is called by upper layer protocol.
4752 * Return : zero for success and -1 for failure
4753 * Context : called from queuecommand which can be called from process
4754 - * or scsi soft irq.
4755 + * or scsi soft irq.
4756 * Locks : called with the host lock and irqs disabled.
4757 */
4758 static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
4759 @@ -995,18 +1007,16 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
4760 }
4761
4762 memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
4763 - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
4764 - fc_frame_set_offset(fp, 0);
4765 + fr_cmd(fp) = fsp->cmd;
4766 rport = fsp->rport;
4767 fsp->max_payload = rport->maxframe_size;
4768 rp = rport->dd_data;
4769 - seq = lp->tt.exch_seq_send(lp, fp,
4770 - resp,
4771 - fc_fcp_pkt_destroy,
4772 - fsp, 0,
4773 - fc_host_port_id(rp->local_port->host),
4774 - rport->port_id,
4775 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4776 +
4777 + fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
4778 + fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
4779 + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
4780 +
4781 + seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
4782 if (!seq) {
4783 fc_frame_free(fp);
4784 rc = -1;
4785 @@ -1018,8 +1028,8 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
4786
4787 setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
4788 fc_fcp_timer_set(fsp,
4789 - (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
4790 - FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
4791 + (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
4792 + FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
4793 unlock:
4794 fc_fcp_unlock_pkt(fsp);
4795 return rc;
4796 @@ -1249,50 +1259,33 @@ unlock:
4797 static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
4798 {
4799 struct fc_lport *lp;
4800 - struct fc_seq *seq;
4801 struct fc_frame *fp;
4802 - struct fc_els_rec *rec;
4803 struct fc_rport *rport;
4804 struct fc_rport_libfc_priv *rp;
4805 - u16 ox_id;
4806 - u16 rx_id;
4807
4808 lp = fsp->lp;
4809 rport = fsp->rport;
4810 rp = rport->dd_data;
4811 - seq = fsp->seq_ptr;
4812 - if (!seq || rp->rp_state != RPORT_ST_READY) {
4813 + if (!fsp->seq_ptr || rp->rp_state != RPORT_ST_READY) {
4814 fsp->status_code = FC_HRD_ERROR;
4815 fsp->io_status = SUGGEST_RETRY << 24;
4816 fc_fcp_complete_locked(fsp);
4817 return;
4818 }
4819 - lp->tt.seq_get_xids(seq, &ox_id, &rx_id);
4820 - fp = fc_frame_alloc(lp, sizeof(*rec));
4821 + fp = fc_frame_alloc(lp, sizeof(struct fc_els_rec));
4822 if (!fp)
4823 goto retry;
4824
4825 - rec = fc_frame_payload_get(fp, sizeof(*rec));
4826 - memset(rec, 0, sizeof(*rec));
4827 - rec->rec_cmd = ELS_REC;
4828 - hton24(rec->rec_s_id, fc_host_port_id(lp->host));
4829 - rec->rec_ox_id = htons(ox_id);
4830 - rec->rec_rx_id = htons(rx_id);
4831 -
4832 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
4833 - fc_frame_set_offset(fp, 0);
4834 - seq = lp->tt.exch_seq_send(lp, fp,
4835 - fc_fcp_rec_resp, NULL,
4836 - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4837 - fc_host_port_id(rp->local_port->host),
4838 - rport->port_id,
4839 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4840 -
4841 - if (seq) {
4842 + fr_seq(fp) = fsp->seq_ptr;
4843 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
4844 + fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
4845 + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
4846 + if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
4847 + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
4848 fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
4849 return;
4850 - } else
4851 - fc_frame_free(fp);
4852 + }
4853 + fc_frame_free(fp);
4854 retry:
4855 if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
4856 fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
4857 @@ -1510,17 +1503,15 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
4858 struct fc_lport *lp = fsp->lp;
4859 struct fc_rport *rport;
4860 struct fc_rport_libfc_priv *rp;
4861 + struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
4862 struct fc_seq *seq;
4863 struct fcp_srr *srr;
4864 struct fc_frame *fp;
4865 u8 cdb_op;
4866 - u16 ox_id;
4867 - u16 rx_id;
4868
4869 rport = fsp->rport;
4870 rp = rport->dd_data;
4871 cdb_op = fsp->cdb_cmd.fc_cdb[0];
4872 - lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
4873
4874 if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
4875 goto retry; /* shouldn't happen */
4876 @@ -1531,19 +1522,17 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
4877 srr = fc_frame_payload_get(fp, sizeof(*srr));
4878 memset(srr, 0, sizeof(*srr));
4879 srr->srr_op = ELS_SRR;
4880 - srr->srr_ox_id = htons(ox_id);
4881 - srr->srr_rx_id = htons(rx_id);
4882 + srr->srr_ox_id = htons(ep->oxid);
4883 + srr->srr_rx_id = htons(ep->rxid);
4884 srr->srr_r_ctl = r_ctl;
4885 srr->srr_rel_off = htonl(offset);
4886
4887 - fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP);
4888 - fc_frame_set_offset(fp, 0);
4889 - seq = lp->tt.exch_seq_send(lp, fp,
4890 - fc_fcp_srr_resp, NULL,
4891 - fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
4892 - fc_host_port_id(rp->local_port->host),
4893 - rport->port_id,
4894 - FC_FC_SEQ_INIT | FC_FC_END_SEQ);
4895 + fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
4896 + fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
4897 + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
4898 +
4899 + seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL,
4900 + fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
4901 if (!seq) {
4902 fc_frame_free(fp);
4903 goto retry;
4904 @@ -1565,8 +1554,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
4905 {
4906 struct fc_fcp_pkt *fsp = arg;
4907 struct fc_frame_header *fh;
4908 - u16 ox_id;
4909 - u16 rx_id;
4910
4911 if (IS_ERR(fp)) {
4912 fc_fcp_srr_error(fsp, fp);
4913 @@ -1590,8 +1577,6 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
4914 }
4915
4916 fsp->recov_seq = NULL;
4917 -
4918 - fsp->lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
4919 switch (fc_frame_payload_op(fp)) {
4920 case ELS_LS_ACC:
4921 fsp->recov_retry = 0;
4922 @@ -2007,7 +1992,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
4923 return SUCCESS;
4924 } else {
4925 shost_printk(KERN_INFO, shost, "Host reset failed. "
4926 - "lport not ready.\n");
4927 + "lport not ready.\n");
4928 return FAILED;
4929 }
4930 }
4931 diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
4932 index 388dc6c..0bbeff2 100644
4933 --- a/drivers/scsi/libfc/fc_frame.c
4934 +++ b/drivers/scsi/libfc/fc_frame.c
4935 @@ -25,7 +25,7 @@
4936 #include <linux/skbuff.h>
4937 #include <linux/crc32.h>
4938
4939 -#include <scsi/libfc/fc_frame.h>
4940 +#include <scsi/fc_frame.h>
4941
4942 /*
4943 * Check the CRC in a frame.
4944 @@ -82,7 +82,8 @@ struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
4945 if (fp) {
4946 memset((char *) fr_hdr(fp) + payload_len, 0, fill);
4947 /* trim is OK, we just allocated it so there are no fragments */
4948 - skb_trim(fp_skb(fp), payload_len + sizeof(struct fc_frame_header));
4949 + skb_trim(fp_skb(fp),
4950 + payload_len + sizeof(struct fc_frame_header));
4951 }
4952 return fp;
4953 }
4954 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
4955 index 7e7c060..083d57b 100644
4956 --- a/drivers/scsi/libfc/fc_lport.c
4957 +++ b/drivers/scsi/libfc/fc_lport.c
4958 @@ -78,7 +78,8 @@
4959
4960 #include <scsi/fc/fc_gs.h>
4961
4962 -#include <scsi/libfc/libfc.h>
4963 +#include <scsi/libfc.h>
4964 +#include <scsi/fc_encode.h>
4965
4966 /* Fabric IDs to use for point-to-point mode, chosen on whims. */
4967 #define FC_LOCAL_PTP_FID_LO 0x010101
4968 @@ -124,71 +125,59 @@ static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
4969 }
4970
4971 /**
4972 - * fc_lport_lookup_rport - lookup a remote port by port_id
4973 - * @lport: Fibre Channel host port instance
4974 - * @port_id: remote port port_id to match
4975 - */
4976 -struct fc_rport *fc_lport_lookup_rport(const struct fc_lport *lport,
4977 - u32 port_id)
4978 -{
4979 - struct fc_rport *rport, *found;
4980 - struct fc_rport_libfc_priv *rdata;
4981 -
4982 - found = NULL;
4983 -
4984 - list_for_each_entry(rdata, &lport->rports, peers) {
4985 - rport = PRIV_TO_RPORT(rdata);
4986 - if (rport->port_id == port_id) {
4987 - found = rport;
4988 - get_device(&found->dev);
4989 - break;
4990 - }
4991 - }
4992 - return found;
4993 -}
4994 -
4995 -
4996 -
4997 -/**
4998 * fc_lport_rport_event - Event handler for rport events
4999 * @lport: The lport which is receiving the event
5000 * @rport: The rport which the event has occured on
5001 * @event: The event that occured
5002 *
5003 * Locking Note: The rport lock should not be held when calling
5004 - * this function.
5005 + * this function.
5006 */
5007 static void fc_lport_rport_event(struct fc_lport *lport,
5008 struct fc_rport *rport,
5009 enum fc_lport_event event)
5010 {
5011 - struct fc_rport_libfc_priv *rdata = rport->dd_data;
5012 -
5013 FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
5014 rport->port_id);
5015
5016 - mutex_lock(&lport->lp_mutex);
5017 switch (event) {
5018 - case LPORT_EV_RPORT_CREATED:
5019 + case RPORT_EV_CREATED:
5020 if (rport->port_id == FC_FID_DIR_SERV) {
5021 - lport->dns_rp = rport;
5022 - fc_lport_enter_rpn_id(lport);
5023 - } else {
5024 - list_add_tail(&rdata->peers, &lport->rports);
5025 - }
5026 + mutex_lock(&lport->lp_mutex);
5027 + if (lport->state == LPORT_ST_DNS) {
5028 + lport->dns_rp = rport;
5029 + fc_lport_enter_rpn_id(lport);
5030 + } else {
5031 + FC_DEBUG_LPORT("Received an CREATED event on "
5032 + "port (%6x) for the directory "
5033 + "server, but the lport is not "
5034 + "in the DNS state, it's in the "
5035 + "%d state", rport->port_id,
5036 + lport->state);
5037 + lport->tt.rport_logoff(rport);
5038 + }
5039 + mutex_unlock(&lport->lp_mutex);
5040 + } else
5041 + FC_DEBUG_LPORT("Received an event for port (%6x) "
5042 + "which is not the directory server\n",
5043 + rport->port_id);
5044 break;
5045 - case LPORT_EV_RPORT_LOGO:
5046 - case LPORT_EV_RPORT_FAILED:
5047 - case LPORT_EV_RPORT_STOP:
5048 - if (rport->port_id == FC_FID_DIR_SERV)
5049 + case RPORT_EV_LOGO:
5050 + case RPORT_EV_FAILED:
5051 + case RPORT_EV_STOP:
5052 + if (rport->port_id == FC_FID_DIR_SERV) {
5053 + mutex_lock(&lport->lp_mutex);
5054 lport->dns_rp = NULL;
5055 - else
5056 - list_del(&rdata->peers);
5057 + mutex_unlock(&lport->lp_mutex);
5058 +
5059 + } else
5060 + FC_DEBUG_LPORT("Received an event for port (%6x) "
5061 + "which is not the directory server\n",
5062 + rport->port_id);
5063 break;
5064 - case LPORT_EV_RPORT_NONE:
5065 + case RPORT_EV_NONE:
5066 break;
5067 }
5068 - mutex_unlock(&lport->lp_mutex);
5069 }
5070
5071 /**
5072 @@ -225,7 +214,7 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
5073 dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
5074
5075 if (lport->ptp_rp) {
5076 - lport->tt.rport_stop(lport->ptp_rp);
5077 + lport->tt.rport_logoff(lport->ptp_rp);
5078 lport->ptp_rp = NULL;
5079 }
5080
5081 @@ -379,6 +368,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
5082 struct fc_lport *lport)
5083 {
5084 struct fc_frame *fp;
5085 + struct fc_exch *ep = fc_seq_exch(sp);
5086 unsigned int len;
5087 void *pp;
5088 void *dp;
5089 @@ -399,9 +389,10 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
5090 memcpy(dp, pp, len);
5091 *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
5092 sp = lport->tt.seq_start_next(sp);
5093 - f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5094 - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
5095 - lport->tt.seq_send(lport, sp, fp, f_ctl);
5096 + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5097 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
5098 + FC_TYPE_ELS, f_ctl, 0);
5099 + lport->tt.seq_send(lport, sp, fp);
5100 }
5101 fc_frame_free(in_fp);
5102 }
5103 @@ -419,6 +410,7 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
5104 struct fc_lport *lport)
5105 {
5106 struct fc_frame *fp;
5107 + struct fc_exch *ep = fc_seq_exch(sp);
5108 struct fc_els_rnid *req;
5109 struct {
5110 struct fc_els_rnid_resp rnid;
5111 @@ -462,9 +454,11 @@ static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
5112 sizeof(rp->gen));
5113 }
5114 sp = lport->tt.seq_start_next(sp);
5115 - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5116 - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
5117 - lport->tt.seq_send(lport, sp, fp, f_ctl);
5118 + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
5119 + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
5120 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
5121 + FC_TYPE_ELS, f_ctl, 0);
5122 + lport->tt.seq_send(lport, sp, fp);
5123 }
5124 }
5125 fc_frame_free(in_fp);
5126 @@ -492,7 +486,7 @@ static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
5127 * @lport: The lport that should log into the fabric
5128 *
5129 * Locking Note: This function should not be called
5130 - * with the lport lock held.
5131 + * with the lport lock held.
5132 */
5133 int fc_fabric_login(struct fc_lport *lport)
5134 {
5135 @@ -515,6 +509,9 @@ EXPORT_SYMBOL(fc_fabric_login);
5136 */
5137 void fc_linkup(struct fc_lport *lport)
5138 {
5139 + FC_DEBUG_LPORT("Link is up for port (%6x)\n",
5140 + fc_host_port_id(lport->host));
5141 +
5142 mutex_lock(&lport->lp_mutex);
5143 if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
5144 lport->link_status |= FC_LINK_UP;
5145 @@ -533,13 +530,14 @@ EXPORT_SYMBOL(fc_linkup);
5146 void fc_linkdown(struct fc_lport *lport)
5147 {
5148 mutex_lock(&lport->lp_mutex);
5149 + FC_DEBUG_LPORT("Link is down for port (%6x)\n",
5150 + fc_host_port_id(lport->host));
5151
5152 if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
5153 lport->link_status &= ~(FC_LINK_UP);
5154 fc_lport_enter_reset(lport);
5155 lport->tt.fcp_cleanup(lport);
5156 }
5157 -
5158 mutex_unlock(&lport->lp_mutex);
5159 }
5160 EXPORT_SYMBOL(fc_linkdown);
5161 @@ -577,9 +575,9 @@ EXPORT_SYMBOL(fc_unpause);
5162 **/
5163 int fc_fabric_logoff(struct fc_lport *lport)
5164 {
5165 + lport->tt.disc_stop_final(lport);
5166 mutex_lock(&lport->lp_mutex);
5167 fc_lport_enter_logo(lport);
5168 - lport->tt.fcp_cleanup(lport);
5169 mutex_unlock(&lport->lp_mutex);
5170 return 0;
5171 }
5172 @@ -599,9 +597,8 @@ EXPORT_SYMBOL(fc_fabric_logoff);
5173 **/
5174 int fc_lport_destroy(struct fc_lport *lport)
5175 {
5176 - cancel_delayed_work_sync(&lport->disc_work);
5177 - lport->tt.fcp_abort_io(lport);
5178 lport->tt.frame_send = fc_frame_drop;
5179 + lport->tt.fcp_abort_io(lport);
5180 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
5181 return 0;
5182 }
5183 @@ -626,10 +623,8 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs)
5184 rc = 0;
5185 }
5186
5187 - if (!rc && mfs < old_mfs) {
5188 - lport->disc_done = 0;
5189 + if (!rc && mfs < old_mfs)
5190 fc_lport_enter_reset(lport);
5191 - }
5192
5193 mutex_unlock(&lport->lp_mutex);
5194
5195 @@ -638,6 +633,31 @@ int fc_set_mfs(struct fc_lport *lport, u32 mfs)
5196 EXPORT_SYMBOL(fc_set_mfs);
5197
5198 /**
5199 + * fc_lport_disc_callback - Callback for discovery events
5200 + * @lport: FC local port
5201 + * @event: The discovery event
5202 + */
5203 +void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
5204 +{
5205 + switch (event) {
5206 + case DISC_EV_SUCCESS:
5207 + FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
5208 + fc_host_port_id(lport->host));
5209 + break;
5210 + case DISC_EV_FAILED:
5211 + FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
5212 + fc_host_port_id(lport->host));
5213 + mutex_lock(&lport->lp_mutex);
5214 + fc_lport_enter_reset(lport);
5215 + mutex_unlock(&lport->lp_mutex);
5216 + break;
5217 + case DISC_EV_NONE:
5218 + WARN_ON(1);
5219 + break;
5220 + }
5221 +}
5222 +
5223 +/**
5224 * fc_rport_enter_ready - Enter the ready state and start discovery
5225 * @lport: Fibre Channel local port that is ready
5226 *
5227 @@ -651,7 +671,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
5228
5229 fc_lport_state_enter(lport, LPORT_ST_READY);
5230
5231 - lport->tt.disc_start(lport);
5232 + lport->tt.disc_start(fc_lport_disc_callback, lport);
5233 }
5234
5235 /**
5236 @@ -674,6 +694,7 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
5237 struct fc_frame *fp;
5238 struct fc_frame_header *fh;
5239 struct fc_seq *sp;
5240 + struct fc_exch *ep;
5241 struct fc_els_flogi *flp;
5242 struct fc_els_flogi *new_flp;
5243 u64 remote_wwpn;
5244 @@ -724,9 +745,11 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
5245 * Send the response. If this fails, the originator should
5246 * repeat the sequence.
5247 */
5248 - f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5249 - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
5250 - lport->tt.seq_send(lport, sp, fp, f_ctl);
5251 + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
5252 + ep = fc_seq_exch(sp);
5253 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
5254 + FC_TYPE_ELS, f_ctl, 0);
5255 + lport->tt.seq_send(lport, sp, fp);
5256
5257 } else {
5258 fc_lport_error(lport, fp);
5259 @@ -734,8 +757,8 @@ static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
5260 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
5261 get_unaligned_be64(&flp->fl_wwnn));
5262
5263 - if (lport->tt.disc_start(lport))
5264 - FC_DBG("target discovery start error\n");
5265 + lport->tt.disc_start(fc_lport_disc_callback, lport);
5266 +
5267 out:
5268 sp = fr_seq(rx_fp);
5269 fc_frame_free(rx_fp);
5270 @@ -751,7 +774,7 @@ out:
5271 * if an rport should handle the request.
5272 *
5273 * Locking Note: This function should not be called with the lport
5274 - * lock held becuase it will grab the lock.
5275 + * lock held becuase it will grab the lock.
5276 */
5277 static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
5278 struct fc_frame *fp)
5279 @@ -808,7 +831,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
5280 s_id = ntoh24(fh->fh_s_id);
5281 d_id = ntoh24(fh->fh_d_id);
5282
5283 - rport = fc_lport_lookup_rport(lport, s_id);
5284 + rport = lport->tt.rport_lookup(lport, s_id);
5285 if (rport) {
5286 lport->tt.rport_recv_req(sp, fp, rport);
5287 put_device(&rport->dev); /* hold from lookup */
5288 @@ -840,7 +863,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp,
5289 * @lport: The lport which should be reset
5290 *
5291 * Locking Note: This functions should not be called with the
5292 - * lport lock held.
5293 + * lport lock held.
5294 */
5295 int fc_lport_reset(struct fc_lport *lport)
5296 {
5297 @@ -852,24 +875,6 @@ int fc_lport_reset(struct fc_lport *lport)
5298 EXPORT_SYMBOL(fc_lport_reset);
5299
5300 /**
5301 - * fc_lport_stop_rports - delete all the remote ports associated with the lport
5302 - * @lport: libfc local port instance
5303 - *
5304 - * Locking Note: This function expects that the lport mutex is locked before
5305 - * calling it.
5306 - */
5307 -void fc_lport_stop_rports(struct fc_lport *lport)
5308 -{
5309 - struct fc_rport *rport;
5310 - struct fc_rport_libfc_priv *rdata;
5311 -
5312 - list_for_each_entry(rdata, &lport->rports, peers) {
5313 - rport = PRIV_TO_RPORT(rdata);
5314 - lport->tt.rport_stop(rport);
5315 - }
5316 -}
5317 -
5318 -/**
5319 * fc_rport_enter_reset - Reset the local port
5320 * @lport: Fibre Channel local port to be reset
5321 *
5322 @@ -883,17 +888,15 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
5323
5324 fc_lport_state_enter(lport, LPORT_ST_RESET);
5325
5326 - if (lport->dns_rp) {
5327 - lport->tt.rport_stop(lport->dns_rp);
5328 - lport->dns_rp = NULL;
5329 - }
5330 + if (lport->dns_rp)
5331 + lport->tt.rport_logoff(lport->dns_rp);
5332
5333 if (lport->ptp_rp) {
5334 - lport->tt.rport_stop(lport->ptp_rp);
5335 + lport->tt.rport_logoff(lport->ptp_rp);
5336 lport->ptp_rp = NULL;
5337 }
5338
5339 - fc_lport_stop_rports(lport);
5340 + lport->tt.disc_stop(lport);
5341
5342 lport->tt.exch_mgr_reset(lport->emp, 0, 0);
5343 fc_host_fabric_name(lport->host) = 0;
5344 @@ -952,7 +955,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
5345
5346 /**
5347 * fc_lport_rft_id_resp - Handle response to Register Fibre
5348 - * Channel Types by ID (RPN_ID) request
5349 + * Channel Types by ID (RPN_ID) request
5350 * @sp: current sequence in RPN_ID exchange
5351 * @fp: response frame
5352 * @lp_arg: Fibre Channel host port instance
5353 @@ -1004,7 +1007,7 @@ err:
5354
5355 /**
5356 * fc_lport_rpn_id_resp - Handle response to Register Port
5357 - * Name by ID (RPN_ID) request
5358 + * Name by ID (RPN_ID) request
5359 * @sp: current sequence in RPN_ID exchange
5360 * @fp: response frame
5361 * @lp_arg: Fibre Channel host port instance
5362 @@ -1110,32 +1113,20 @@ err:
5363 static void fc_lport_enter_scr(struct fc_lport *lport)
5364 {
5365 struct fc_frame *fp;
5366 - struct fc_els_scr *scr;
5367
5368 FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
5369 fc_host_port_id(lport->host), fc_lport_state(lport));
5370
5371 fc_lport_state_enter(lport, LPORT_ST_SCR);
5372
5373 - fp = fc_frame_alloc(lport, sizeof(*scr));
5374 + fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
5375 if (!fp) {
5376 fc_lport_error(lport, fp);
5377 return;
5378 }
5379
5380 - scr = fc_frame_payload_get(fp, sizeof(*scr));
5381 - memset(scr, 0, sizeof(*scr));
5382 - scr->scr_cmd = ELS_SCR;
5383 - scr->scr_reg_func = ELS_SCRF_FULL;
5384 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5385 - fc_frame_set_offset(fp, 0);
5386 -
5387 - if (!lport->tt.exch_seq_send(lport, fp,
5388 - fc_lport_scr_resp, NULL,
5389 - lport, lport->e_d_tov,
5390 - fc_host_port_id(lport->host),
5391 - FC_FID_FCTRL,
5392 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5393 + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
5394 + fc_lport_scr_resp, lport, lport->e_d_tov))
5395 fc_lport_error(lport, fp);
5396 }
5397
5398 @@ -1149,11 +1140,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
5399 static void fc_lport_enter_rft_id(struct fc_lport *lport)
5400 {
5401 struct fc_frame *fp;
5402 - struct req {
5403 - struct fc_ct_hdr ct;
5404 - struct fc_ns_fid fid; /* port ID object */
5405 - struct fc_ns_fts fts; /* FC4-types object */
5406 - } *req;
5407 struct fc_ns_fts *lps;
5408 int i;
5409
5410 @@ -1170,31 +1156,20 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
5411 if (i < 0) {
5412 /* nothing to register, move on to SCR */
5413 fc_lport_enter_scr(lport);
5414 - } else {
5415 - fp = fc_frame_alloc(lport, sizeof(*req));
5416 - if (!fp) {
5417 - fc_lport_error(lport, fp);
5418 - return;
5419 - }
5420 + return;
5421 + }
5422
5423 - req = fc_frame_payload_get(fp, sizeof(*req));
5424 - fc_fill_dns_hdr(lport, &req->ct,
5425 - FC_NS_RFT_ID,
5426 - sizeof(*req) -
5427 - sizeof(struct fc_ct_hdr));
5428 - hton24(req->fid.fp_fid, fc_host_port_id(lport->host));
5429 - req->fts = *lps;
5430 - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
5431 -
5432 - if (!lport->tt.exch_seq_send(lport, fp,
5433 - fc_lport_rft_id_resp, NULL,
5434 - lport, lport->e_d_tov,
5435 - fc_host_port_id(lport->host),
5436 - FC_FID_DIR_SERV,
5437 - FC_FC_SEQ_INIT |
5438 - FC_FC_END_SEQ))
5439 - fc_lport_error(lport, fp);
5440 + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
5441 + sizeof(struct fc_ns_rft));
5442 + if (!fp) {
5443 + fc_lport_error(lport, fp);
5444 + return;
5445 }
5446 +
5447 + if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
5448 + fc_lport_rft_id_resp,
5449 + lport, lport->e_d_tov))
5450 + fc_lport_error(lport, fp);
5451 }
5452
5453 /**
5454 @@ -1207,37 +1182,23 @@ static void fc_lport_enter_rft_id(struct fc_lport *lport)
5455 static void fc_lport_enter_rpn_id(struct fc_lport *lport)
5456 {
5457 struct fc_frame *fp;
5458 - struct req {
5459 - struct fc_ct_hdr ct;
5460 - struct fc_ns_rn_id rn;
5461 - } *req;
5462
5463 FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
5464 fc_host_port_id(lport->host), fc_lport_state(lport));
5465
5466 fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
5467
5468 - fp = fc_frame_alloc(lport, sizeof(*req));
5469 + fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
5470 + sizeof(struct fc_ns_rn_id));
5471 if (!fp) {
5472 fc_lport_error(lport, fp);
5473 return;
5474 }
5475
5476 - req = fc_frame_payload_get(fp, sizeof(*req));
5477 - memset(req, 0, sizeof(*req));
5478 - fc_fill_dns_hdr(lport, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
5479 - hton24(req->rn.fr_fid.fp_fid, fc_host_port_id(lport->host));
5480 - put_unaligned_be64(lport->wwpn, &req->rn.fr_wwn);
5481 - fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
5482 -
5483 - if (!lport->tt.exch_seq_send(lport, fp,
5484 - fc_lport_rpn_id_resp, NULL,
5485 - lport, lport->e_d_tov,
5486 - fc_host_port_id(lport->host),
5487 - FC_FID_DIR_SERV,
5488 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5489 + if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
5490 + fc_lport_rpn_id_resp,
5491 + lport, lport->e_d_tov))
5492 fc_lport_error(lport, fp);
5493 -
5494 }
5495
5496 /**
5497 @@ -1264,16 +1225,10 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
5498
5499 fc_lport_state_enter(lport, LPORT_ST_DNS);
5500
5501 - if (!lport->dns_rp) {
5502 - /* Set up a rogue rport to directory server */
5503 - rport = fc_rport_rogue_create(&dp);
5504 -
5505 - if (!rport)
5506 - goto err;
5507 - lport->dns_rp = rport;
5508 - }
5509 + rport = fc_rport_rogue_create(&dp);
5510 + if (!rport)
5511 + goto err;
5512
5513 - rport = lport->dns_rp;
5514 rdata = rport->dd_data;
5515 rdata->event_callback = fc_lport_rport_event;
5516 lport->tt.rport_login(rport);
5517 @@ -1388,10 +1343,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5518 fc_lport_state_enter(lport, LPORT_ST_LOGO);
5519
5520 /* DNS session should be closed so we can release it here */
5521 - if (lport->dns_rp) {
5522 - lport->tt.rport_logout(lport->dns_rp);
5523 - lport->dns_rp = NULL;
5524 - }
5525 + if (lport->dns_rp)
5526 + lport->tt.rport_logoff(lport->dns_rp);
5527
5528 fp = fc_frame_alloc(lport, sizeof(*logo));
5529 if (!fp) {
5530 @@ -1399,19 +1352,8 @@ static void fc_lport_enter_logo(struct fc_lport *lport)
5531 return;
5532 }
5533
5534 - logo = fc_frame_payload_get(fp, sizeof(*logo));
5535 - memset(logo, 0, sizeof(*logo));
5536 - logo->fl_cmd = ELS_LOGO;
5537 - hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
5538 - logo->fl_n_port_wwn = htonll(lport->wwpn);
5539 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5540 - fc_frame_set_offset(fp, 0);
5541 -
5542 - if (!lport->tt.exch_seq_send(lport, fp,
5543 - fc_lport_logo_resp, NULL,
5544 - lport, lport->e_d_tov,
5545 - fc_host_port_id(lport->host), FC_FID_FLOGI,
5546 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5547 + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
5548 + lport, lport->e_d_tov))
5549 fc_lport_error(lport, fp);
5550 }
5551
5552 @@ -1496,8 +1438,8 @@ static void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5553 if (flp) {
5554 csp_flags = ntohs(flp->fl_csp.sp_features);
5555 if ((csp_flags & FC_SP_FT_FPORT) == 0) {
5556 - if (lport->tt.disc_start(lport))
5557 - FC_DBG("Target disc start error\n");
5558 + lport->tt.disc_start(fc_lport_disc_callback,
5559 + lport);
5560 }
5561 }
5562 } else {
5563 @@ -1520,29 +1462,18 @@ err:
5564 void fc_lport_enter_flogi(struct fc_lport *lport)
5565 {
5566 struct fc_frame *fp;
5567 - struct fc_els_flogi *flp;
5568
5569 FC_DEBUG_LPORT("Processing FLOGI state\n");
5570
5571 fc_lport_state_enter(lport, LPORT_ST_FLOGI);
5572
5573 - fp = fc_frame_alloc(lport, sizeof(*flp));
5574 + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
5575 if (!fp)
5576 return fc_lport_error(lport, fp);
5577
5578 - flp = fc_frame_payload_get(fp, sizeof(*flp));
5579 - fc_lport_flogi_fill(lport, flp, ELS_FLOGI);
5580 -
5581 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5582 - fc_frame_set_offset(fp, 0);
5583 -
5584 - if (!lport->tt.exch_seq_send(lport, fp,
5585 - fc_lport_flogi_resp, NULL,
5586 - lport, lport->e_d_tov,
5587 - 0, FC_FID_FLOGI,
5588 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5589 + if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
5590 + fc_lport_flogi_resp, lport, lport->e_d_tov))
5591 fc_lport_error(lport, fp);
5592 -
5593 }
5594
5595 /* Configure a fc_lport */
5596 @@ -1550,12 +1481,9 @@ int fc_lport_config(struct fc_lport *lport)
5597 {
5598 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
5599 mutex_init(&lport->lp_mutex);
5600 - INIT_LIST_HEAD(&lport->rports);
5601
5602 fc_lport_state_enter(lport, LPORT_ST_NONE);
5603
5604 - lport->disc_delay = DNS_DELAY;
5605 -
5606 fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
5607 fc_lport_add_fc4_type(lport, FC_TYPE_CT);
5608
5609 @@ -1571,12 +1499,6 @@ int fc_lport_init(struct fc_lport *lport)
5610 if (!lport->tt.lport_reset)
5611 lport->tt.lport_reset = fc_lport_reset;
5612
5613 - if (!lport->tt.rport_lookup)
5614 - lport->tt.rport_lookup = fc_lport_lookup_rport;
5615 -
5616 - if (!lport->tt.event_callback)
5617 - lport->tt.event_callback = fc_lport_rport_event;
5618 -
5619 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
5620 fc_host_node_name(lport->host) = lport->wwnn;
5621 fc_host_port_name(lport->host) = lport->wwpn;
5622 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
5623 index 2d0bd85..d081af5 100644
5624 --- a/drivers/scsi/libfc/fc_rport.c
5625 +++ b/drivers/scsi/libfc/fc_rport.c
5626 @@ -36,9 +36,9 @@
5627 * The locking strategy is similar to the lport's strategy. The lock protects
5628 * the rport's states and is held and released by the entry points to the rport
5629 * block. All _enter_* functions correspond to rport states and expect the rport
5630 - * mutex to be locked before calling them. This means that rports only handle one
5631 - * request or response at a time, since they're not critical for the I/O path
5632 - * this potential over-use of the mutex is acceptable.
5633 + * mutex to be locked before calling them. This means that rports only handle
5634 + * one request or response at a time, since they're not critical for the I/O
5635 + * path this potential over-use of the mutex is acceptable.
5636 */
5637
5638 #include <linux/kernel.h>
5639 @@ -49,7 +49,8 @@
5640 #include <linux/workqueue.h>
5641 #include <asm/unaligned.h>
5642
5643 -#include <scsi/libfc/libfc.h>
5644 +#include <scsi/libfc.h>
5645 +#include <scsi/fc_encode.h>
5646
5647 static int fc_rport_debug;
5648
5649 @@ -59,7 +60,7 @@ static int fc_rport_debug;
5650 FC_DBG(fmt); \
5651 } while (0)
5652
5653 -static struct workqueue_struct *rport_event_queue;
5654 +struct workqueue_struct *rport_event_queue;
5655
5656 static void fc_rport_enter_plogi(struct fc_rport *);
5657 static void fc_rport_enter_prli(struct fc_rport *);
5658 @@ -122,7 +123,7 @@ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
5659 rdata->local_port = dp->lp;
5660 rdata->trans_state = FC_PORTSTATE_ROGUE;
5661 rdata->rp_state = RPORT_ST_INIT;
5662 - rdata->event = LPORT_EV_RPORT_NONE;
5663 + rdata->event = RPORT_EV_NONE;
5664 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
5665 rdata->event_callback = NULL;
5666 rdata->e_d_tov = dp->lp->e_d_tov;
5667 @@ -196,43 +197,6 @@ fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
5668 }
5669
5670 /**
5671 - * fc_lport_plogi_fill - Fill in PLOGI command for request
5672 - * @lport: Fibre Channel host port instance
5673 - * @plogi: PLOGI command structure to fill (same structure as FLOGI)
5674 - * @op: either ELS_PLOGI for a localy generated request, or ELS_LS_ACC
5675 - */
5676 -static void
5677 -fc_lport_plogi_fill(struct fc_lport *lport,
5678 - struct fc_els_flogi *plogi, unsigned int op)
5679 -{
5680 - struct fc_els_csp *sp;
5681 - struct fc_els_cssp *cp;
5682 -
5683 - memset(plogi, 0, sizeof(*plogi));
5684 - plogi->fl_cmd = (u8) op;
5685 - put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
5686 - put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
5687 -
5688 - sp = &plogi->fl_csp;
5689 - sp->sp_hi_ver = 0x20;
5690 - sp->sp_lo_ver = 0x20;
5691 - sp->sp_bb_cred = htons(10); /* this gets set by gateway */
5692 - sp->sp_bb_data = htons((u16) lport->mfs);
5693 - cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
5694 - cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
5695 - if (op != ELS_FLOGI) {
5696 - sp->sp_features = htons(FC_SP_FT_CIRO);
5697 - sp->sp_tot_seq = htons(255); /* seq. we accept */
5698 - sp->sp_rel_off = htons(0x1f);
5699 - sp->sp_e_d_tov = htonl(lport->e_d_tov);
5700 -
5701 - cp->cp_rdfs = htons((u16) lport->mfs);
5702 - cp->cp_con_seq = htons(255);
5703 - cp->cp_open_seq = 1;
5704 - }
5705 -}
5706 -
5707 -/**
5708 * fc_rport_state_enter - Change the rport's state
5709 * @rport: The rport whose state should change
5710 * @new: The new state of the rport
5711 @@ -263,7 +227,7 @@ static void fc_rport_work(struct work_struct *work)
5712 event = rdata->event;
5713 event_callback = rdata->event_callback;
5714
5715 - if (event == LPORT_EV_RPORT_CREATED) {
5716 + if (event == RPORT_EV_CREATED) {
5717 struct fc_rport *new_rport;
5718 struct fc_rport_libfc_priv *new_rdata;
5719 struct fc_rport_identifiers ids;
5720 @@ -300,19 +264,20 @@ static void fc_rport_work(struct work_struct *work)
5721 } else {
5722 FC_DBG("Failed to create the rport for port "
5723 "(%6x).\n", ids.port_id);
5724 - event = LPORT_EV_RPORT_FAILED;
5725 + event = RPORT_EV_FAILED;
5726 }
5727 fc_rport_rogue_destroy(rport);
5728 rport = new_rport;
5729 rdata = new_rport->dd_data;
5730 - event_callback(lport, rport, event);
5731 - } else if ((event == LPORT_EV_RPORT_FAILED) ||
5732 - (event == LPORT_EV_RPORT_LOGO) ||
5733 - (event == LPORT_EV_RPORT_STOP)) {
5734 -
5735 + if (event_callback)
5736 + event_callback(lport, rport, event);
5737 + } else if ((event == RPORT_EV_FAILED) ||
5738 + (event == RPORT_EV_LOGO) ||
5739 + (event == RPORT_EV_STOP)) {
5740 trans_state = rdata->trans_state;
5741 mutex_unlock(&rdata->rp_mutex);
5742 - event_callback(lport, rport, event);
5743 + if (event_callback)
5744 + event_callback(lport, rport, event);
5745 if (trans_state == FC_PORTSTATE_ROGUE)
5746 fc_rport_rogue_destroy(rport);
5747 else
5748 @@ -345,45 +310,32 @@ int fc_rport_login(struct fc_rport *rport)
5749 }
5750
5751 /**
5752 - * fc_rport_logout - Logout of the remote port and delete it
5753 - * @rport: Fibre Channel remote port
5754 + * fc_rport_logoff - Logoff and remove an rport
5755 + * @rport: Fibre Channel remote port to be removed
5756 *
5757 * Locking Note: Called without the rport lock held. This
5758 * function will hold the rport lock, call an _enter_*
5759 * function and then unlock the rport.
5760 */
5761 -int fc_rport_logout(struct fc_rport *rport)
5762 +int fc_rport_logoff(struct fc_rport *rport)
5763 {
5764 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5765
5766 mutex_lock(&rdata->rp_mutex);
5767
5768 - FC_DEBUG_RPORT("Logout of port (%6x)\n", rport->port_id);
5769 + FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
5770
5771 fc_rport_enter_logo(rport);
5772
5773 - mutex_unlock(&rdata->rp_mutex);
5774 -
5775 - return 0;
5776 -}
5777 -
5778 -/**
5779 - * fc_rport_remove - Remove an rport
5780 - * @rport: Fibre Channel remote port to be removed
5781 - *
5782 - * Locking Note: Called without the rport lock held. This
5783 - * function will hold the rport lock, call an _enter_*
5784 - * function and then unlock the rport.
5785 - */
5786 -int fc_rport_stop(struct fc_rport *rport)
5787 -{
5788 - struct fc_rport_libfc_priv *rdata = rport->dd_data;
5789 -
5790 - mutex_lock(&rdata->rp_mutex);
5791 + /*
5792 + * Change the state to NONE so that we discard
5793 + * the response.
5794 + */
5795 + fc_rport_state_enter(rport, RPORT_ST_NONE);
5796
5797 - FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
5798 + cancel_delayed_work_sync(&rdata->retry_work);
5799
5800 - rdata->event = LPORT_EV_RPORT_STOP;
5801 + rdata->event = RPORT_EV_STOP;
5802 queue_work(rport_event_queue, &rdata->event_work);
5803
5804 mutex_unlock(&rdata->rp_mutex);
5805 @@ -406,7 +358,7 @@ static void fc_rport_enter_ready(struct fc_rport *rport)
5806
5807 FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
5808
5809 - rdata->event = LPORT_EV_RPORT_CREATED;
5810 + rdata->event = RPORT_EV_CREATED;
5811 queue_work(rport_event_queue, &rdata->event_work);
5812 }
5813
5814 @@ -441,9 +393,7 @@ static void fc_rport_timeout(struct work_struct *work)
5815 break;
5816 case RPORT_ST_READY:
5817 case RPORT_ST_INIT:
5818 - break;
5819 case RPORT_ST_NONE:
5820 - BUG();
5821 break;
5822 }
5823 put_device(&rport->dev);
5824 @@ -487,8 +437,9 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
5825 case RPORT_ST_PLOGI:
5826 case RPORT_ST_PRLI:
5827 case RPORT_ST_LOGO:
5828 - rdata->event = LPORT_EV_RPORT_FAILED;
5829 - queue_work(rport_event_queue, &rdata->event_work);
5830 + rdata->event = RPORT_EV_FAILED;
5831 + queue_work(rport_event_queue,
5832 + &rdata->event_work);
5833 break;
5834 case RPORT_ST_RTV:
5835 fc_rport_enter_ready(rport);
5836 @@ -496,7 +447,6 @@ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
5837 case RPORT_ST_NONE:
5838 case RPORT_ST_READY:
5839 case RPORT_ST_INIT:
5840 - BUG();
5841 break;
5842 }
5843 }
5844 @@ -527,7 +477,8 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
5845
5846 mutex_lock(&rdata->rp_mutex);
5847
5848 - FC_DEBUG_RPORT("Received a PLOGI response\n");
5849 + FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
5850 + rport->port_id);
5851
5852 if (rdata->rp_state != RPORT_ST_PLOGI) {
5853 FC_DBG("Received a PLOGI response, but in state %s\n",
5854 @@ -588,7 +539,6 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5855 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5856 struct fc_lport *lport = rdata->local_port;
5857 struct fc_frame *fp;
5858 - struct fc_els_flogi *plogi;
5859
5860 FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
5861 rport->port_id, fc_rport_state(rport));
5862 @@ -596,23 +546,15 @@ static void fc_rport_enter_plogi(struct fc_rport *rport)
5863 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
5864
5865 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
5866 - fp = fc_frame_alloc(lport, sizeof(*plogi));
5867 + fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
5868 if (!fp) {
5869 fc_rport_error(rport, fp);
5870 return;
5871 }
5872 -
5873 - plogi = fc_frame_payload_get(fp, sizeof(*plogi));
5874 - fc_lport_plogi_fill(rdata->local_port, plogi, ELS_PLOGI);
5875 rdata->e_d_tov = lport->e_d_tov;
5876 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5877 -
5878 - if (!lport->tt.exch_seq_send(lport, fp,
5879 - fc_rport_plogi_resp, NULL,
5880 - rport, lport->e_d_tov,
5881 - fc_host_port_id(rdata->local_port->host),
5882 - rport->port_id,
5883 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5884 +
5885 + if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
5886 + fc_rport_plogi_resp, rport, lport->e_d_tov))
5887 fc_rport_error(rport, fp);
5888 }
5889
5890 @@ -641,7 +583,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5891
5892 mutex_lock(&rdata->rp_mutex);
5893
5894 - FC_DEBUG_RPORT("Received a PRLI response\n");
5895 + FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
5896 + rport->port_id);
5897
5898 if (rdata->rp_state != RPORT_ST_PRLI) {
5899 FC_DBG("Received a PRLI response, but in state %s\n",
5900 @@ -674,7 +617,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
5901
5902 } else {
5903 FC_DBG("Bad ELS response\n");
5904 - rdata->event = LPORT_EV_RPORT_FAILED;
5905 + rdata->event = RPORT_EV_FAILED;
5906 queue_work(rport_event_queue, &rdata->event_work);
5907 }
5908
5909 @@ -703,25 +646,26 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
5910
5911 mutex_lock(&rdata->rp_mutex);
5912
5913 - FC_DEBUG_RPORT("Received a LOGO response\n");
5914 -
5915 - if (rdata->rp_state != RPORT_ST_LOGO) {
5916 - FC_DBG("Received a LOGO response, but in state %s\n",
5917 - fc_rport_state(rport));
5918 - goto out;
5919 - }
5920 + FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
5921 + rport->port_id);
5922
5923 if (IS_ERR(fp)) {
5924 fc_rport_error(rport, fp);
5925 goto err;
5926 }
5927
5928 + if (rdata->rp_state != RPORT_ST_LOGO) {
5929 + FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
5930 + fc_rport_state(rport));
5931 + goto out;
5932 + }
5933 +
5934 op = fc_frame_payload_op(fp);
5935 if (op == ELS_LS_ACC) {
5936 fc_rport_enter_rtv(rport);
5937 } else {
5938 FC_DBG("Bad ELS response\n");
5939 - rdata->event = LPORT_EV_RPORT_LOGO;
5940 + rdata->event = RPORT_EV_LOGO;
5941 queue_work(rport_event_queue, &rdata->event_work);
5942 }
5943
5944 @@ -759,22 +703,8 @@ static void fc_rport_enter_prli(struct fc_rport *rport)
5945 return;
5946 }
5947
5948 - pp = fc_frame_payload_get(fp, sizeof(*pp));
5949 - memset(pp, 0, sizeof(*pp));
5950 - pp->prli.prli_cmd = ELS_PRLI;
5951 - pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
5952 - pp->prli.prli_len = htons(sizeof(*pp));
5953 - pp->spp.spp_type = FC_TYPE_FCP;
5954 - pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
5955 - pp->spp.spp_params = htonl(lport->service_params);
5956 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
5957 -
5958 - if (!lport->tt.exch_seq_send(lport, fp,
5959 - fc_rport_prli_resp, NULL,
5960 - rport, lport->e_d_tov,
5961 - fc_host_port_id(lport->host),
5962 - rport->port_id,
5963 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
5964 + if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
5965 + fc_rport_prli_resp, rport, lport->e_d_tov))
5966 fc_rport_error(rport, fp);
5967 }
5968
5969 @@ -799,7 +729,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
5970
5971 mutex_lock(&rdata->rp_mutex);
5972
5973 - FC_DEBUG_RPORT("Received a RTV response\n");
5974 + FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
5975 + rport->port_id);
5976
5977 if (rdata->rp_state != RPORT_ST_RTV) {
5978 FC_DBG("Received a RTV response, but in state %s\n",
5979 @@ -851,7 +782,6 @@ err:
5980 */
5981 static void fc_rport_enter_rtv(struct fc_rport *rport)
5982 {
5983 - struct fc_els_rtv *rtv;
5984 struct fc_frame *fp;
5985 struct fc_rport_libfc_priv *rdata = rport->dd_data;
5986 struct fc_lport *lport = rdata->local_port;
5987 @@ -861,23 +791,14 @@ static void fc_rport_enter_rtv(struct fc_rport *rport)
5988
5989 fc_rport_state_enter(rport, RPORT_ST_RTV);
5990
5991 - fp = fc_frame_alloc(lport, sizeof(*rtv));
5992 + fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
5993 if (!fp) {
5994 fc_rport_error(rport, fp);
5995 return;
5996 }
5997
5998 - rtv = fc_frame_payload_get(fp, sizeof(*rtv));
5999 - memset(rtv, 0, sizeof(*rtv));
6000 - rtv->rtv_cmd = ELS_RTV;
6001 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
6002 -
6003 - if (!lport->tt.exch_seq_send(lport, fp,
6004 - fc_rport_rtv_resp, NULL,
6005 - rport, lport->e_d_tov,
6006 - fc_host_port_id(lport->host),
6007 - rport->port_id,
6008 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
6009 + if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
6010 + fc_rport_rtv_resp, rport, lport->e_d_tov))
6011 fc_rport_error(rport, fp);
6012 }
6013
6014 @@ -893,32 +814,20 @@ static void fc_rport_enter_logo(struct fc_rport *rport)
6015 struct fc_rport_libfc_priv *rdata = rport->dd_data;
6016 struct fc_lport *lport = rdata->local_port;
6017 struct fc_frame *fp;
6018 - struct fc_els_logo *logo;
6019
6020 FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
6021 rport->port_id, fc_rport_state(rport));
6022
6023 fc_rport_state_enter(rport, RPORT_ST_LOGO);
6024
6025 - fp = fc_frame_alloc(lport, sizeof(*logo));
6026 + fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
6027 if (!fp) {
6028 fc_rport_error(rport, fp);
6029 return;
6030 }
6031
6032 - logo = fc_frame_payload_get(fp, sizeof(*logo));
6033 - memset(logo, 0, sizeof(*logo));
6034 - logo->fl_cmd = ELS_LOGO;
6035 - hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
6036 - logo->fl_n_port_wwn = htonll(lport->wwpn);
6037 - fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
6038 -
6039 - if (!lport->tt.exch_seq_send(lport, fp,
6040 - fc_rport_logo_resp, NULL,
6041 - rport, lport->e_d_tov,
6042 - fc_host_port_id(lport->host),
6043 - rport->port_id,
6044 - FC_FC_SEQ_INIT | FC_FC_END_SEQ))
6045 + if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
6046 + fc_rport_logo_resp, rport, lport->e_d_tov))
6047 fc_rport_error(rport, fp);
6048 }
6049
6050 @@ -982,7 +891,6 @@ void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
6051 }
6052
6053 mutex_unlock(&rdata->rp_mutex);
6054 - fc_frame_free(fp);
6055 }
6056
6057 /**
6058 @@ -1000,7 +908,7 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
6059 struct fc_rport_libfc_priv *rdata = rport->dd_data;
6060 struct fc_lport *lport = rdata->local_port;
6061 struct fc_frame *fp = rx_fp;
6062 -
6063 + struct fc_exch *ep;
6064 struct fc_frame_header *fh;
6065 struct fc_els_flogi *pl;
6066 struct fc_seq_els_data rjt_data;
6067 @@ -1089,17 +997,18 @@ static void fc_rport_recv_plogi_req(struct fc_rport *rport,
6068 rport->maxframe_size =
6069 fc_plogi_get_maxframe(pl, lport->mfs);
6070 fc_frame_free(rx_fp);
6071 - pl = fc_frame_payload_get(fp, sizeof(*pl));
6072 - WARN_ON(!pl);
6073 - fc_lport_plogi_fill(lport, pl, ELS_LS_ACC);
6074 + fc_plogi_fill(lport, fp, ELS_LS_ACC);
6075
6076 /*
6077 * Send LS_ACC. If this fails,
6078 * the originator should retry.
6079 */
6080 - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
6081 - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
6082 - lport->tt.seq_send(lport, sp, fp, f_ctl);
6083 + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
6084 + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
6085 + ep = fc_seq_exch(sp);
6086 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
6087 + FC_TYPE_ELS, f_ctl, 0);
6088 + lport->tt.seq_send(lport, sp, fp);
6089 if (rdata->rp_state == RPORT_ST_PLOGI)
6090 fc_rport_enter_prli(rport);
6091 }
6092 @@ -1120,7 +1029,7 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
6093 {
6094 struct fc_rport_libfc_priv *rdata = rport->dd_data;
6095 struct fc_lport *lport = rdata->local_port;
6096 -
6097 + struct fc_exch *ep;
6098 struct fc_frame *fp;
6099 struct fc_frame_header *fh;
6100 struct {
6101 @@ -1234,9 +1143,12 @@ static void fc_rport_recv_prli_req(struct fc_rport *rport,
6102 /*
6103 * Send LS_ACC. If this fails, the originator should retry.
6104 */
6105 - f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
6106 - fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
6107 - lport->tt.seq_send(lport, sp, fp, f_ctl);
6108 + f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
6109 + f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
6110 + ep = fc_seq_exch(sp);
6111 + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
6112 + FC_TYPE_ELS, f_ctl, 0);
6113 + lport->tt.seq_send(lport, sp, fp);
6114
6115 /*
6116 * Get lock and re-check state.
6117 @@ -1307,27 +1219,33 @@ static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
6118 "while in state %s\n", ntoh24(fh->fh_s_id),
6119 fc_rport_state(rport));
6120
6121 - rdata->event = LPORT_EV_RPORT_LOGO;
6122 + rdata->event = RPORT_EV_LOGO;
6123 queue_work(rport_event_queue, &rdata->event_work);
6124
6125 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
6126 fc_frame_free(fp);
6127 }
6128
6129 +static void fc_rport_flush_queue(void)
6130 +{
6131 + flush_workqueue(rport_event_queue);
6132 +}
6133 +
6134 +
6135 int fc_rport_init(struct fc_lport *lport)
6136 {
6137 if (!lport->tt.rport_login)
6138 lport->tt.rport_login = fc_rport_login;
6139
6140 - if (!lport->tt.rport_logout)
6141 - lport->tt.rport_logout = fc_rport_logout;
6142 -
6143 - if (!lport->tt.rport_stop)
6144 - lport->tt.rport_stop = fc_rport_stop;
6145 + if (!lport->tt.rport_logoff)
6146 + lport->tt.rport_logoff = fc_rport_logoff;
6147
6148 if (!lport->tt.rport_recv_req)
6149 lport->tt.rport_recv_req = fc_rport_recv_req;
6150
6151 + if (!lport->tt.rport_flush_queue)
6152 + lport->tt.rport_flush_queue = fc_rport_flush_queue;
6153 +
6154 return 0;
6155 }
6156 EXPORT_SYMBOL(fc_rport_init);
6157 diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h
6158 index 59c9d0c..a6118a2 100644
6159 --- a/include/scsi/fc/fc_fcoe.h
6160 +++ b/include/scsi/fc/fc_fcoe.h
6161 @@ -31,6 +31,10 @@
6162 #define ETH_P_FCOE 0x8906 /* FCOE ether type */
6163 #endif
6164
6165 +#ifndef ETH_P_8021Q
6166 +#define ETH_P_8021Q 0x8100
6167 +#endif
6168 +
6169 /*
6170 * FC_FCOE_OUI hasn't been standardized yet. XXX TBD.
6171 */
6172 @@ -81,7 +85,9 @@ struct fcoe_crc_eof {
6173 } __attribute__((packed));
6174
6175 /*
6176 - * Store OUI + DID into MAC address field.
6177 + * fc_fcoe_set_mac - Store OUI + DID into MAC address field.
6178 + * @mac: mac address to be set
6179 + * @did: fc dest id to use
6180 */
6181 static inline void fc_fcoe_set_mac(u8 *mac, u8 *did)
6182 {
6183 @@ -93,8 +99,4 @@ static inline void fc_fcoe_set_mac(u8 *mac, u8 *did)
6184 mac[5] = did[2];
6185 }
6186
6187 -#ifndef ETH_P_8021Q
6188 -#define ETH_P_8021Q 0x8100
6189 -#endif
6190 -
6191 #endif /* _FC_FCOE_H_ */
6192 diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h
6193 index 3897c6c..3e4801d 100644
6194 --- a/include/scsi/fc/fc_fs.h
6195 +++ b/include/scsi/fc/fc_fs.h
6196 @@ -82,6 +82,12 @@ enum fc_rctl {
6197 FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
6198 FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
6199 /*
6200 + * Optional Extended Headers
6201 + */
6202 + FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */
6203 + FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */
6204 + FC_RCTL_ENCH = 0x52, /* encapsulation header */
6205 + /*
6206 * Basic Link Services fh_r_ctl values.
6207 */
6208 FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */
6209 @@ -200,6 +206,8 @@ enum fc_fh_type {
6210 * Exchange IDs.
6211 */
6212 #define FC_XID_UNKNOWN 0xffff /* unknown exchange ID */
6213 +#define FC_XID_MIN 0x0 /* supported min exchange ID */
6214 +#define FC_XID_MAX 0xfffe /* supported max exchange ID */
6215
6216 /*
6217 * fh_f_ctl - Frame control flags.
6218 diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
6219 new file mode 100644
6220 index 0000000..6300f55
6221 --- /dev/null
6222 +++ b/include/scsi/fc_encode.h
6223 @@ -0,0 +1,309 @@
6224 +/*
6225 + * Copyright(c) 2008 Intel Corporation. All rights reserved.
6226 + *
6227 + * This program is free software; you can redistribute it and/or modify it
6228 + * under the terms and conditions of the GNU General Public License,
6229 + * version 2, as published by the Free Software Foundation.
6230 + *
6231 + * This program is distributed in the hope it will be useful, but WITHOUT
6232 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6233 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6234 + * more details.
6235 + *
6236 + * You should have received a copy of the GNU General Public License along with
6237 + * this program; if not, write to the Free Software Foundation, Inc.,
6238 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6239 + *
6240 + * Maintained at www.Open-FCoE.org
6241 + */
6242 +
6243 +#ifndef _FC_ENCODE_H_
6244 +#define _FC_ENCODE_H_
6245 +#include <asm/unaligned.h>
6246 +
6247 +struct fc_ns_rft {
6248 + struct fc_ns_fid fid; /* port ID object */
6249 + struct fc_ns_fts fts; /* FC4-types object */
6250 +};
6251 +
6252 +struct fc_ct_req {
6253 + struct fc_ct_hdr hdr;
6254 + union {
6255 + struct fc_ns_gid_ft gid;
6256 + struct fc_ns_rn_id rn;
6257 + struct fc_ns_rft rft;
6258 + } payload;
6259 +};
6260 +
6261 +/**
6262 + * fill FC header fields in specified fc_frame
6263 + */
6264 +static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
6265 + u32 did, u32 sid, enum fc_fh_type type,
6266 + u32 f_ctl, u32 parm_offset)
6267 +{
6268 + struct fc_frame_header *fh;
6269 +
6270 + fh = fc_frame_header_get(fp);
6271 + WARN_ON(r_ctl == 0);
6272 + fh->fh_r_ctl = r_ctl;
6273 + hton24(fh->fh_d_id, did);
6274 + hton24(fh->fh_s_id, sid);
6275 + fh->fh_type = type;
6276 + hton24(fh->fh_f_ctl, f_ctl);
6277 + fh->fh_cs_ctl = 0;
6278 + fh->fh_df_ctl = 0;
6279 + fh->fh_parm_offset = htonl(parm_offset);
6280 +}
6281 +
6282 +/**
6283 + * fc_ct_hdr_fill- fills ct header and reset ct payload
6284 + * returns pointer to ct request.
6285 + */
6286 +static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
6287 + unsigned int op, size_t req_size)
6288 +{
6289 + struct fc_ct_req *ct;
6290 + size_t ct_plen;
6291 +
6292 + ct_plen = sizeof(struct fc_ct_hdr) + req_size;
6293 + ct = fc_frame_payload_get(fp, ct_plen);
6294 + memset(ct, 0, ct_plen);
6295 + ct->hdr.ct_rev = FC_CT_REV;
6296 + ct->hdr.ct_fs_type = FC_FST_DIR;
6297 + ct->hdr.ct_fs_subtype = FC_NS_SUBTYPE;
6298 + ct->hdr.ct_cmd = htons((u16) op);
6299 + return ct;
6300 +}
6301 +
6302 +/**
6303 + * fc_ct_fill - Fill in a name service request frame
6304 + */
6305 +static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp,
6306 + unsigned int op, enum fc_rctl *r_ctl, u32 *did,
6307 + enum fc_fh_type *fh_type)
6308 +{
6309 + struct fc_ct_req *ct;
6310 +
6311 + switch (op) {
6312 + case FC_NS_GPN_FT:
6313 + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft));
6314 + ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
6315 + break;
6316 +
6317 + case FC_NS_RFT_ID:
6318 + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft));
6319 + hton24(ct->payload.rft.fid.fp_fid,
6320 + fc_host_port_id(lport->host));
6321 + ct->payload.rft.fts = lport->fcts;
6322 + break;
6323 +
6324 + case FC_NS_RPN_ID:
6325 + ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id));
6326 + hton24(ct->payload.rn.fr_fid.fp_fid,
6327 + fc_host_port_id(lport->host));
6328 + ct->payload.rft.fts = lport->fcts;
6329 + put_unaligned_be64(lport->wwpn, &ct->payload.rn.fr_wwn);
6330 + break;
6331 +
6332 + default:
6333 + FC_DBG("Invalid op code %x \n", op);
6334 + return -EINVAL;
6335 + }
6336 + *r_ctl = FC_RCTL_DD_UNSOL_CTL;
6337 + *did = FC_FID_DIR_SERV;
6338 + *fh_type = FC_TYPE_CT;
6339 + return 0;
6340 +}
6341 +
6342 +/**
6343 + * fc_plogi_fill - Fill in plogi request frame
6344 + */
6345 +static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
6346 + unsigned int op)
6347 +{
6348 + struct fc_els_flogi *plogi;
6349 + struct fc_els_csp *csp;
6350 + struct fc_els_cssp *cp;
6351 +
6352 + plogi = fc_frame_payload_get(fp, sizeof(*plogi));
6353 + memset(plogi, 0, sizeof(*plogi));
6354 + plogi->fl_cmd = (u8) op;
6355 + put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
6356 + put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
6357 +
6358 + csp = &plogi->fl_csp;
6359 + csp->sp_hi_ver = 0x20;
6360 + csp->sp_lo_ver = 0x20;
6361 + csp->sp_bb_cred = htons(10); /* this gets set by gateway */
6362 + csp->sp_bb_data = htons((u16) lport->mfs);
6363 + cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
6364 + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
6365 + csp->sp_features = htons(FC_SP_FT_CIRO);
6366 + csp->sp_tot_seq = htons(255); /* seq. we accept */
6367 + csp->sp_rel_off = htons(0x1f);
6368 + csp->sp_e_d_tov = htonl(lport->e_d_tov);
6369 +
6370 + cp->cp_rdfs = htons((u16) lport->mfs);
6371 + cp->cp_con_seq = htons(255);
6372 + cp->cp_open_seq = 1;
6373 +}
6374 +
6375 +/**
6376 + * fc_flogi_fill - Fill in a flogi request frame.
6377 + */
6378 +static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
6379 +{
6380 + struct fc_els_csp *sp;
6381 + struct fc_els_cssp *cp;
6382 + struct fc_els_flogi *flogi;
6383 +
6384 + flogi = fc_frame_payload_get(fp, sizeof(*flogi));
6385 + memset(flogi, 0, sizeof(*flogi));
6386 + flogi->fl_cmd = (u8) ELS_FLOGI;
6387 + put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
6388 + put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
6389 + sp = &flogi->fl_csp;
6390 + sp->sp_hi_ver = 0x20;
6391 + sp->sp_lo_ver = 0x20;
6392 + sp->sp_bb_cred = htons(10); /* this gets set by gateway */
6393 + sp->sp_bb_data = htons((u16) lport->mfs);
6394 + cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
6395 + cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
6396 +}
6397 +
6398 +/**
6399 + * fc_logo_fill - Fill in a logo request frame.
6400 + */
6401 +static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
6402 +{
6403 + struct fc_els_logo *logo;
6404 +
6405 + logo = fc_frame_payload_get(fp, sizeof(*logo));
6406 + memset(logo, 0, sizeof(*logo));
6407 + logo->fl_cmd = ELS_LOGO;
6408 + hton24(logo->fl_n_port_id, fc_host_port_id(lport->host));
6409 + logo->fl_n_port_wwn = htonll(lport->wwpn);
6410 +}
6411 +
6412 +/**
6413 + * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
6414 + */
6415 +static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
6416 +{
6417 + struct fc_els_rtv *rtv;
6418 +
6419 + rtv = fc_frame_payload_get(fp, sizeof(*rtv));
6420 + memset(rtv, 0, sizeof(*rtv));
6421 + rtv->rtv_cmd = ELS_RTV;
6422 +}
6423 +
6424 +/**
6425 + * fc_rec_fill - Fill in rec request frame
6426 + */
6427 +static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
6428 +{
6429 + struct fc_els_rec *rec;
6430 + struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
6431 +
6432 + rec = fc_frame_payload_get(fp, sizeof(*rec));
6433 + memset(rec, 0, sizeof(*rec));
6434 + rec->rec_cmd = ELS_REC;
6435 + hton24(rec->rec_s_id, fc_host_port_id(lport->host));
6436 + rec->rec_ox_id = htons(ep->oxid);
6437 + rec->rec_rx_id = htons(ep->rxid);
6438 +}
6439 +
6440 +/**
6441 + * fc_prli_fill - Fill in prli request frame
6442 + */
6443 +static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
6444 +{
6445 + struct {
6446 + struct fc_els_prli prli;
6447 + struct fc_els_spp spp;
6448 + } *pp;
6449 +
6450 + pp = fc_frame_payload_get(fp, sizeof(*pp));
6451 + memset(pp, 0, sizeof(*pp));
6452 + pp->prli.prli_cmd = ELS_PRLI;
6453 + pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
6454 + pp->prli.prli_len = htons(sizeof(*pp));
6455 + pp->spp.spp_type = FC_TYPE_FCP;
6456 + pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
6457 + pp->spp.spp_params = htonl(lport->service_params);
6458 +}
6459 +
6460 +/**
6461 + * fc_scr_fill - Fill in a scr request frame.
6462 + */
6463 +static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
6464 +{
6465 + struct fc_els_scr *scr;
6466 +
6467 + scr = fc_frame_payload_get(fp, sizeof(*scr));
6468 + memset(scr, 0, sizeof(*scr));
6469 + scr->scr_cmd = ELS_SCR;
6470 + scr->scr_reg_func = ELS_SCRF_FULL;
6471 +}
6472 +
6473 +/**
6474 + * fc_els_fill - Fill in an ELS request frame
6475 + */
6476 +static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport,
6477 + struct fc_frame *fp, unsigned int op,
6478 + enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type)
6479 +{
6480 + switch (op) {
6481 + case ELS_PLOGI:
6482 + fc_plogi_fill(lport, fp, ELS_PLOGI);
6483 + *did = rport->port_id;
6484 + break;
6485 +
6486 + case ELS_FLOGI:
6487 + fc_flogi_fill(lport, fp);
6488 + *did = FC_FID_FLOGI;
6489 + break;
6490 +
6491 + case ELS_LOGO:
6492 + fc_logo_fill(lport, fp);
6493 + *did = FC_FID_FLOGI;
6494 + /*
6495 + * if rport is valid then it
6496 + * is port logo, therefore
6497 + * set did to rport id.
6498 + */
6499 + if (rport)
6500 + *did = rport->port_id;
6501 + break;
6502 +
6503 + case ELS_RTV:
6504 + fc_rtv_fill(lport, fp);
6505 + *did = rport->port_id;
6506 + break;
6507 +
6508 + case ELS_REC:
6509 + fc_rec_fill(lport, fp);
6510 + *did = rport->port_id;
6511 + break;
6512 +
6513 + case ELS_PRLI:
6514 + fc_prli_fill(lport, fp);
6515 + *did = rport->port_id;
6516 + break;
6517 +
6518 + case ELS_SCR:
6519 + fc_scr_fill(lport, fp);
6520 + *did = FC_FID_FCTRL;
6521 + break;
6522 +
6523 + default:
6524 + FC_DBG("Invalid op code %x \n", op);
6525 + return -EINVAL;
6526 + }
6527 +
6528 + *r_ctl = FC_RCTL_ELS_REQ;
6529 + *fh_type = FC_TYPE_ELS;
6530 + return 0;
6531 +}
6532 +#endif /* _FC_ENCODE_H_ */
6533 diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
6534 new file mode 100644
6535 index 0000000..dc5f734
6536 --- /dev/null
6537 +++ b/include/scsi/fc_frame.h
6538 @@ -0,0 +1,239 @@
6539 +/*
6540 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
6541 + *
6542 + * This program is free software; you can redistribute it and/or modify it
6543 + * under the terms and conditions of the GNU General Public License,
6544 + * version 2, as published by the Free Software Foundation.
6545 + *
6546 + * This program is distributed in the hope it will be useful, but WITHOUT
6547 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6548 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6549 + * more details.
6550 + *
6551 + * You should have received a copy of the GNU General Public License along with
6552 + * this program; if not, write to the Free Software Foundation, Inc.,
6553 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6554 + *
6555 + * Maintained at www.Open-FCoE.org
6556 + */
6557 +
6558 +#ifndef _FC_FRAME_H_
6559 +#define _FC_FRAME_H_
6560 +
6561 +#include <linux/scatterlist.h>
6562 +#include <linux/skbuff.h>
6563 +#include <scsi/scsi_cmnd.h>
6564 +
6565 +#include <scsi/fc/fc_fs.h>
6566 +#include <scsi/fc/fc_fcp.h>
6567 +#include <scsi/fc/fc_encaps.h>
6568 +
6569 +/*
6570 + * The fc_frame interface is used to pass frame data between functions.
6571 + * The frame includes the data buffer, length, and SOF / EOF delimiter types.
6572 + * A pointer to the port structure of the receiving port is also includeded.
6573 + */
6574 +
6575 +#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
6576 +#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
6577 +
6578 +/*
6579 + * Information about an individual fibre channel frame received or to be sent.
6580 + * The buffer may be in up to 4 additional non-contiguous sections,
6581 + * but the linear section must hold the frame header.
6582 + */
6583 +#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */
6584 +
6585 +#define fp_skb(fp) (&((fp)->skb))
6586 +#define fr_hdr(fp) ((fp)->skb.data)
6587 +#define fr_len(fp) ((fp)->skb.len)
6588 +#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
6589 +#define fr_dev(fp) (fr_cb(fp)->fr_dev)
6590 +#define fr_seq(fp) (fr_cb(fp)->fr_seq)
6591 +#define fr_sof(fp) (fr_cb(fp)->fr_sof)
6592 +#define fr_eof(fp) (fr_cb(fp)->fr_eof)
6593 +#define fr_flags(fp) (fr_cb(fp)->fr_flags)
6594 +#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
6595 +#define fr_cmd(fp) (fr_cb(fp)->fr_cmd)
6596 +#define fr_dir(fp) (fr_cmd(fp)->sc_data_direction)
6597 +
6598 +struct fc_frame {
6599 + struct sk_buff skb;
6600 +};
6601 +
6602 +struct fcoe_rcv_info {
6603 + struct packet_type *ptype;
6604 + struct fc_lport *fr_dev; /* transport layer private pointer */
6605 + struct fc_seq *fr_seq; /* for use with exchange manager */
6606 + struct scsi_cmnd *fr_cmd; /* for use of scsi command */
6607 + enum fc_sof fr_sof; /* start of frame delimiter */
6608 + enum fc_eof fr_eof; /* end of frame delimiter */
6609 + u8 fr_flags; /* flags - see below */
6610 + u16 fr_max_payload; /* max FC payload */
6611 +};
6612 +
6613 +/*
6614 + * Get fc_frame pointer for an skb that's already been imported.
6615 + */
6616 +static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
6617 +{
6618 + BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
6619 + return (struct fcoe_rcv_info *) skb->cb;
6620 +}
6621 +
6622 +/*
6623 + * fr_flags.
6624 + */
6625 +#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */
6626 +
6627 +/*
6628 + * Initialize a frame.
6629 + * We don't do a complete memset here for performance reasons.
6630 + * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
6631 + */
6632 +static inline void fc_frame_init(struct fc_frame *fp)
6633 +{
6634 + fr_dev(fp) = NULL;
6635 + fr_seq(fp) = NULL;
6636 + fr_flags(fp) = 0;
6637 +}
6638 +
6639 +struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
6640 +
6641 +struct fc_frame *__fc_frame_alloc(size_t payload_len);
6642 +
6643 +/*
6644 + * Get frame for sending via port.
6645 + */
6646 +static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
6647 + size_t payload_len)
6648 +{
6649 + return __fc_frame_alloc(payload_len);
6650 +}
6651 +
6652 +/*
6653 + * Allocate fc_frame structure and buffer. Set the initial length to
6654 + * payload_size + sizeof (struct fc_frame_header).
6655 + */
6656 +static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
6657 +{
6658 + struct fc_frame *fp;
6659 +
6660 + /*
6661 + * Note: Since len will often be a constant multiple of 4,
6662 + * this check will usually be evaluated and eliminated at compile time.
6663 + */
6664 + if ((len % 4) != 0)
6665 + fp = fc_frame_alloc_fill(dev, len);
6666 + else
6667 + fp = _fc_frame_alloc(dev, len);
6668 + return fp;
6669 +}
6670 +
6671 +/*
6672 + * Free the fc_frame structure and buffer.
6673 + */
6674 +static inline void fc_frame_free(struct fc_frame *fp)
6675 +{
6676 + kfree_skb(fp_skb(fp));
6677 +}
6678 +
6679 +static inline int fc_frame_is_linear(struct fc_frame *fp)
6680 +{
6681 + return !skb_is_nonlinear(fp_skb(fp));
6682 +}
6683 +
6684 +/*
6685 + * Get frame header from message in fc_frame structure.
6686 + * This hides a cast and provides a place to add some checking.
6687 + */
6688 +static inline
6689 +struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
6690 +{
6691 + WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
6692 + return (struct fc_frame_header *) fr_hdr(fp);
6693 +}
6694 +
6695 +/*
6696 + * Get frame payload from message in fc_frame structure.
6697 + * This hides a cast and provides a place to add some checking.
6698 + * The len parameter is the minimum length for the payload portion.
6699 + * Returns NULL if the frame is too short.
6700 + *
6701 + * This assumes the interesting part of the payload is in the first part
6702 + * of the buffer for received data. This may not be appropriate to use for
6703 + * buffers being transmitted.
6704 + */
6705 +static inline void *fc_frame_payload_get(const struct fc_frame *fp,
6706 + size_t len)
6707 +{
6708 + void *pp = NULL;
6709 +
6710 + if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
6711 + pp = fc_frame_header_get(fp) + 1;
6712 + return pp;
6713 +}
6714 +
6715 +/*
6716 + * Get frame payload opcode (first byte) from message in fc_frame structure.
6717 + * This hides a cast and provides a place to add some checking. Return 0
6718 + * if the frame has no payload.
6719 + */
6720 +static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
6721 +{
6722 + u8 *cp;
6723 +
6724 + cp = fc_frame_payload_get(fp, sizeof(u8));
6725 + if (!cp)
6726 + return 0;
6727 + return *cp;
6728 +
6729 +}
6730 +
6731 +/*
6732 + * Get FC class from frame.
6733 + */
6734 +static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
6735 +{
6736 + return fc_sof_class(fr_sof(fp));
6737 +}
6738 +
6739 +/*
6740 + * Check the CRC in a frame.
6741 + * The CRC immediately follows the last data item *AFTER* the length.
6742 + * The return value is zero if the CRC matches.
6743 + */
6744 +u32 fc_frame_crc_check(struct fc_frame *);
6745 +
6746 +static inline u8 fc_frame_rctl(const struct fc_frame *fp)
6747 +{
6748 + return fc_frame_header_get(fp)->fh_r_ctl;
6749 +}
6750 +
6751 +static inline bool fc_frame_is_cmd(const struct fc_frame *fp)
6752 +{
6753 + return fc_frame_rctl(fp) == FC_RCTL_DD_UNSOL_CMD;
6754 +}
6755 +
6756 +static inline bool fc_frame_is_read(const struct fc_frame *fp)
6757 +{
6758 + if (fc_frame_is_cmd(fp) && fr_cmd(fp))
6759 + return fr_dir(fp) == DMA_FROM_DEVICE;
6760 + return false;
6761 +}
6762 +
6763 +static inline bool fc_frame_is_write(const struct fc_frame *fp)
6764 +{
6765 + if (fc_frame_is_cmd(fp) && fr_cmd(fp))
6766 + return fr_dir(fp) == DMA_TO_DEVICE;
6767 + return false;
6768 +}
6769 +
6770 +/*
6771 + * Check for leaks.
6772 + * Print the frame header of any currently allocated frame, assuming there
6773 + * should be none at this point.
6774 + */
6775 +void fc_frame_leak_check(void);
6776 +
6777 +#endif /* _FC_FRAME_H_ */
6778 diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
6779 new file mode 100644
6780 index 0000000..8dca2af
6781 --- /dev/null
6782 +++ b/include/scsi/fc_transport_fcoe.h
6783 @@ -0,0 +1,54 @@
6784 +#ifndef FC_TRANSPORT_FCOE_H
6785 +#define FC_TRANSPORT_FCOE_H
6786 +
6787 +#include <linux/device.h>
6788 +#include <linux/netdevice.h>
6789 +#include <scsi/scsi_host.h>
6790 +#include <scsi/libfc.h>
6791 +
6792 +/**
6793 + * struct fcoe_transport - FCoE transport struct for generic transport
6794 + * for Ethernet devices as well as pure HBAs
6795 + *
6796 + * @name: name for thsi transport
6797 + * @bus: physical bus type (pci_bus_type)
6798 + * @driver: physical bus driver for network device
6799 + * @create: entry create function
6800 + * @destroy: exit destroy function
6801 + * @list: list of transports
6802 + */
6803 +struct fcoe_transport {
6804 + char *name;
6805 + unsigned short vendor;
6806 + unsigned short device;
6807 + struct bus_type *bus;
6808 + struct device_driver *driver;
6809 + int (*create)(struct net_device *device);
6810 + int (*destroy)(struct net_device *device);
6811 + bool (*match)(struct net_device *device);
6812 + struct list_head list;
6813 + struct list_head devlist;
6814 + struct mutex devlock;
6815 +};
6816 +
6817 +/**
6818 + * MODULE_ALIAS_FCOE_PCI
6819 + *
6820 + * some care must be taken with this, vendor and device MUST be a hex value
6821 + * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
6822 + */
6823 +#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
6824 + MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
6825 +
6826 +/* exported funcs */
6827 +int fcoe_transport_attach(struct net_device *netdev);
6828 +int fcoe_transport_release(struct net_device *netdev);
6829 +int fcoe_transport_register(struct fcoe_transport *t);
6830 +int fcoe_transport_unregister(struct fcoe_transport *t);
6831 +int fcoe_load_transport_driver(struct net_device *netdev);
6832 +int __init fcoe_transport_init(void);
6833 +int __exit fcoe_transport_exit(void);
6834 +
6835 +/* fcow_sw is the default transport */
6836 +extern struct fcoe_transport fcoe_sw_transport;
6837 +#endif /* FC_TRANSPORT_FCOE_H */
6838 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
6839 new file mode 100644
6840 index 0000000..dac03e2
6841 --- /dev/null
6842 +++ b/include/scsi/libfc.h
6843 @@ -0,0 +1,917 @@
6844 +/*
6845 + * Copyright(c) 2007 Intel Corporation. All rights reserved.
6846 + *
6847 + * This program is free software; you can redistribute it and/or modify it
6848 + * under the terms and conditions of the GNU General Public License,
6849 + * version 2, as published by the Free Software Foundation.
6850 + *
6851 + * This program is distributed in the hope it will be useful, but WITHOUT
6852 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
6853 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
6854 + * more details.
6855 + *
6856 + * You should have received a copy of the GNU General Public License along with
6857 + * this program; if not, write to the Free Software Foundation, Inc.,
6858 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
6859 + *
6860 + * Maintained at www.Open-FCoE.org
6861 + */
6862 +
6863 +#ifndef _LIBFC_H_
6864 +#define _LIBFC_H_
6865 +
6866 +#include <linux/timer.h>
6867 +#include <linux/if.h>
6868 +
6869 +#include <scsi/scsi_transport.h>
6870 +#include <scsi/scsi_transport_fc.h>
6871 +
6872 +#include <scsi/fc/fc_fcp.h>
6873 +#include <scsi/fc/fc_ns.h>
6874 +#include <scsi/fc/fc_els.h>
6875 +#include <scsi/fc/fc_gs.h>
6876 +
6877 +#include <scsi/fc_frame.h>
6878 +
6879 +#define LIBFC_DEBUG
6880 +
6881 +#ifdef LIBFC_DEBUG
6882 +/* Log messages */
6883 +#define FC_DBG(fmt, args...) \
6884 + do { \
6885 + printk(KERN_INFO "%s " fmt, __func__, ##args); \
6886 + } while (0)
6887 +#else
6888 +#define FC_DBG(fmt, args...)
6889 +#endif
6890 +
6891 +/*
6892 + * libfc error codes
6893 + */
6894 +#define FC_NO_ERR 0 /* no error */
6895 +#define FC_EX_TIMEOUT 1 /* Exchange timeout */
6896 +#define FC_EX_CLOSED 2 /* Exchange closed */
6897 +
6898 +/* some helpful macros */
6899 +
6900 +#define ntohll(x) be64_to_cpu(x)
6901 +#define htonll(x) cpu_to_be64(x)
6902 +
6903 +#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
6904 +
6905 +#define hton24(p, v) do { \
6906 + p[0] = (((v) >> 16) & 0xFF); \
6907 + p[1] = (((v) >> 8) & 0xFF); \
6908 + p[2] = ((v) & 0xFF); \
6909 + } while (0)
6910 +
6911 +/*
6912 + * FC HBA status
6913 + */
6914 +#define FC_PAUSE (1 << 1)
6915 +#define FC_LINK_UP (1 << 0)
6916 +
6917 +enum fc_lport_state {
6918 + LPORT_ST_NONE = 0,
6919 + LPORT_ST_FLOGI,
6920 + LPORT_ST_DNS,
6921 + LPORT_ST_RPN_ID,
6922 + LPORT_ST_RFT_ID,
6923 + LPORT_ST_SCR,
6924 + LPORT_ST_READY,
6925 + LPORT_ST_LOGO,
6926 + LPORT_ST_RESET
6927 +};
6928 +
6929 +enum fc_disc_event {
6930 + DISC_EV_NONE = 0,
6931 + DISC_EV_SUCCESS,
6932 + DISC_EV_FAILED
6933 +};
6934 +
6935 +enum fc_lport_event {
6936 + RPORT_EV_NONE = 0,
6937 + RPORT_EV_CREATED,
6938 + RPORT_EV_FAILED,
6939 + RPORT_EV_STOP,
6940 + RPORT_EV_LOGO
6941 +};
6942 +
6943 +enum fc_rport_state {
6944 + RPORT_ST_NONE = 0,
6945 + RPORT_ST_INIT, /* initialized */
6946 + RPORT_ST_PLOGI, /* waiting for PLOGI completion */
6947 + RPORT_ST_PRLI, /* waiting for PRLI completion */
6948 + RPORT_ST_RTV, /* waiting for RTV completion */
6949 + RPORT_ST_READY, /* ready for use */
6950 + RPORT_ST_LOGO, /* port logout sent */
6951 +};
6952 +
6953 +enum fc_rport_trans_state {
6954 + FC_PORTSTATE_ROGUE,
6955 + FC_PORTSTATE_REAL,
6956 +};
6957 +
6958 +/**
6959 + * struct fc_disc_port - temporary discovery port to hold rport identifiers
6960 + * @lp: Fibre Channel host port instance
6961 + * @peers: node for list management during discovery and RSCN processing
6962 + * @ids: identifiers structure to pass to fc_remote_port_add()
6963 + * @rport_work: work struct for starting the rport state machine
6964 + */
6965 +struct fc_disc_port {
6966 + struct fc_lport *lp;
6967 + struct list_head peers;
6968 + struct fc_rport_identifiers ids;
6969 + struct work_struct rport_work;
6970 +};
6971 +
6972 +/**
6973 + * struct fc_rport_libfc_priv - libfc internal information about a remote port
6974 + * @local_port: Fibre Channel host port instance
6975 + * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
6976 + * @flags: REC and RETRY supported flags
6977 + * @max_seq: maximum number of concurrent sequences
6978 + * @retries: retry count in current state
6979 + * @e_d_tov: error detect timeout value (in msec)
6980 + * @r_a_tov: resource allocation timeout value (in msec)
6981 + * @rp_mutex: mutex protects rport
6982 + * @retry_work:
6983 + * @event_callback: Callback for rport READY, FAILED or LOGO
6984 + */
6985 +struct fc_rport_libfc_priv {
6986 + struct fc_lport *local_port;
6987 + enum fc_rport_state rp_state;
6988 + u16 flags;
6989 + #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
6990 + #define FC_RP_FLAGS_RETRY (1 << 1)
6991 + u16 max_seq;
6992 + unsigned int retries;
6993 + unsigned int e_d_tov;
6994 + unsigned int r_a_tov;
6995 + enum fc_rport_trans_state trans_state;
6996 + struct mutex rp_mutex;
6997 + struct delayed_work retry_work;
6998 + enum fc_lport_event event;
6999 + void (*event_callback)(struct fc_lport *,
7000 + struct fc_rport *,
7001 + enum fc_lport_event);
7002 + struct list_head peers;
7003 + struct work_struct event_work;
7004 +};
7005 +
7006 +#define PRIV_TO_RPORT(x) \
7007 + (struct fc_rport *)((void *)x - sizeof(struct fc_rport));
7008 +#define RPORT_TO_PRIV(x) \
7009 + (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
7010 +
7011 +struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
7012 +void fc_rport_rogue_destroy(struct fc_rport *);
7013 +
7014 +static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
7015 +{
7016 + rport->node_name = wwnn;
7017 + rport->port_name = wwpn;
7018 +}
7019 +
7020 +/*
7021 + * fcoe stats structure
7022 + */
7023 +struct fcoe_dev_stats {
7024 + u64 SecondsSinceLastReset;
7025 + u64 TxFrames;
7026 + u64 TxWords;
7027 + u64 RxFrames;
7028 + u64 RxWords;
7029 + u64 ErrorFrames;
7030 + u64 DumpedFrames;
7031 + u64 LinkFailureCount;
7032 + u64 LossOfSignalCount;
7033 + u64 InvalidTxWordCount;
7034 + u64 InvalidCRCCount;
7035 + u64 InputRequests;
7036 + u64 OutputRequests;
7037 + u64 ControlRequests;
7038 + u64 InputMegabytes;
7039 + u64 OutputMegabytes;
7040 +};
7041 +
7042 +/*
7043 + * els data is used for passing ELS respone specific
7044 + * data to send ELS response mainly using infomation
7045 + * in exchange and sequence in EM layer.
7046 + */
7047 +struct fc_seq_els_data {
7048 + struct fc_frame *fp;
7049 + enum fc_els_rjt_reason reason;
7050 + enum fc_els_rjt_explan explan;
7051 +};
7052 +
7053 +/*
7054 + * FCP request structure, one for each scsi cmd request
7055 + */
7056 +struct fc_fcp_pkt {
7057 + /*
7058 + * housekeeping stuff
7059 + */
7060 + struct fc_lport *lp; /* handle to hba struct */
7061 + u16 state; /* scsi_pkt state state */
7062 + u16 tgt_flags; /* target flags */
7063 + atomic_t ref_cnt; /* fcp pkt ref count */
7064 + spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
7065 + * if both are held at the same time */
7066 + /*
7067 + * SCSI I/O related stuff
7068 + */
7069 + struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
7070 + * under host lock */
7071 + struct list_head list; /* tracks queued commands. access under
7072 + * host lock */
7073 + /*
7074 + * timeout related stuff
7075 + */
7076 + struct timer_list timer; /* command timer */
7077 + struct completion tm_done;
7078 + int wait_for_comp;
7079 + unsigned long start_time; /* start jiffie */
7080 + unsigned long end_time; /* end jiffie */
7081 + unsigned long last_pkt_time; /* jiffies of last frame received */
7082 +
7083 + /*
7084 + * scsi cmd and data transfer information
7085 + */
7086 + u32 data_len;
7087 + /*
7088 + * transport related veriables
7089 + */
7090 + struct fcp_cmnd cdb_cmd;
7091 + size_t xfer_len;
7092 + u32 xfer_contig_end; /* offset of end of contiguous xfer */
7093 + u16 max_payload; /* max payload size in bytes */
7094 +
7095 + /*
7096 + * scsi/fcp return status
7097 + */
7098 + u32 io_status; /* SCSI result upper 24 bits */
7099 + u8 cdb_status;
7100 + u8 status_code; /* FCP I/O status */
7101 + /* bit 3 Underrun bit 2: overrun */
7102 + u8 scsi_comp_flags;
7103 + u32 req_flags; /* bit 0: read bit:1 write */
7104 + u32 scsi_resid; /* residule length */
7105 +
7106 + struct fc_rport *rport; /* remote port pointer */
7107 + struct fc_seq *seq_ptr; /* current sequence pointer */
7108 + /*
7109 + * Error Processing
7110 + */
7111 + u8 recov_retry; /* count of recovery retries */
7112 + struct fc_seq *recov_seq; /* sequence for REC or SRR */
7113 +};
7114 +
7115 +/*
7116 + * Structure and function definitions for managing Fibre Channel Exchanges
7117 + * and Sequences
7118 + *
7119 + * fc_exch holds state for one exchange and links to its active sequence.
7120 + *
7121 + * fc_seq holds the state for an individual sequence.
7122 + */
7123 +
7124 +struct fc_exch_mgr;
7125 +
7126 +/*
7127 + * Sequence.
7128 + */
7129 +struct fc_seq {
7130 + u8 id; /* seq ID */
7131 + u16 ssb_stat; /* status flags for sequence status block */
7132 + u16 cnt; /* frames sent so far on sequence */
7133 + u32 f_ctl; /* F_CTL flags for frames */
7134 + u32 rec_data; /* FC-4 value for REC */
7135 +};
7136 +
7137 +#define FC_EX_DONE (1 << 0) /* ep is completed */
7138 +#define FC_EX_RST_CLEANUP (1 << 1) /* reset is forcing completion */
7139 +
7140 +/*
7141 + * Exchange.
7142 + *
7143 + * Locking notes: The ex_lock protects changes to the following fields:
7144 + * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
7145 + * seq_id
7146 + * sequence allocation
7147 + *
7148 + */
7149 +struct fc_exch {
7150 + struct fc_exch_mgr *em; /* exchange manager */
7151 + u32 state; /* internal driver state */
7152 + u16 xid; /* our exchange ID */
7153 + struct list_head ex_list; /* free or busy list linkage */
7154 + spinlock_t ex_lock; /* lock covering exchange state */
7155 + atomic_t ex_refcnt; /* reference counter */
7156 + struct delayed_work timeout_work; /* timer for upper level protocols */
7157 + struct fc_lport *lp; /* fc device instance */
7158 + u16 oxid; /* originator's exchange ID */
7159 + u16 rxid; /* responder's exchange ID */
7160 + u32 oid; /* originator's FCID */
7161 + u32 sid; /* source FCID */
7162 + u32 did; /* destination FCID */
7163 + u32 esb_stat; /* exchange status for ESB */
7164 + u32 r_a_tov; /* r_a_tov from rport (msec) */
7165 + u8 seq_id; /* next sequence ID to use */
7166 + u32 f_ctl; /* F_CTL flags for sequences */
7167 + u8 fh_type; /* frame type */
7168 + enum fc_class class; /* class of service */
7169 + struct fc_seq seq; /* single sequence */
7170 + /*
7171 + * Handler for responses to this current exchange.
7172 + */
7173 + void (*resp)(struct fc_seq *, struct fc_frame *, void *);
7174 + void (*destructor)(struct fc_seq *, void *);
7175 + /*
7176 + * arg is passed as void pointer to exchange
7177 + * resp and destructor handlers
7178 + */
7179 + void *arg;
7180 +};
7181 +#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
7182 +
7183 +struct libfc_function_template {
7184 +
7185 + /**
7186 + * Mandatory Fields
7187 + *
7188 + * These handlers must be implemented by the LLD.
7189 + */
7190 +
7191 + /*
7192 + * Interface to send a FC frame
7193 + */
7194 + int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
7195 +
7196 + /**
7197 + * Optional Fields
7198 + *
7199 + * The LLD may choose to implement any of the following handlers.
7200 + * If LLD doesn't specify hander and leaves its pointer NULL then
7201 + * the default libfc function will be used for that handler.
7202 + */
7203 +
7204 + /**
7205 + * ELS/CT interfaces
7206 + */
7207 +
7208 + /*
7209 + * elsct_send - sends ELS/CT frame
7210 + */
7211 + struct fc_seq *(*elsct_send)(struct fc_lport *lport,
7212 + struct fc_rport *rport,
7213 + struct fc_frame *fp,
7214 + unsigned int op,
7215 + void (*resp)(struct fc_seq *,
7216 + struct fc_frame *fp,
7217 + void *arg),
7218 + void *arg, u32 timer_msec);
7219 + /**
7220 + * Exhance Manager interfaces
7221 + */
7222 +
7223 + /*
7224 + * Send the FC frame payload using a new exchange and sequence.
7225 + *
7226 + * The frame pointer with some of the header's fields must be
7227 + * filled before calling exch_seq_send(), those fields are,
7228 + *
7229 + * - routing control
7230 + * - FC port did
7231 + * - FC port sid
7232 + * - FC header type
7233 + * - frame control
7234 + * - parameter or relative offset
7235 + *
7236 + * The exchange response handler is set in this routine to resp()
7237 + * function pointer. It can be called in two scenarios: if a timeout
7238 + * occurs or if a response frame is received for the exchange. The
7239 + * fc_frame pointer in response handler will also indicate timeout
7240 + * as error using IS_ERR related macros.
7241 + *
7242 + * The exchange destructor handler is also set in this routine.
7243 + * The destructor handler is invoked by EM layer when exchange
7244 + * is about to free, this can be used by caller to free its
7245 + * resources along with exchange free.
7246 + *
7247 + * The arg is passed back to resp and destructor handler.
7248 + *
7249 + * The timeout value (in msec) for an exchange is set if non zero
7250 + * timer_msec argument is specified. The timer is canceled when
7251 + * it fires or when the exchange is done. The exchange timeout handler
7252 + * is registered by EM layer.
7253 + */
7254 + struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
7255 + struct fc_frame *fp,
7256 + void (*resp)(struct fc_seq *sp,
7257 + struct fc_frame *fp,
7258 + void *arg),
7259 + void (*destructor)(struct fc_seq *sp,
7260 + void *arg),
7261 + void *arg, unsigned int timer_msec);
7262 +
7263 + /*
7264 + * send a frame using existing sequence and exchange.
7265 + */
7266 + int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
7267 + struct fc_frame *fp);
7268 +
7269 + /*
7270 + * Send ELS response using mainly infomation
7271 + * in exchange and sequence in EM layer.
7272 + */
7273 + void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
7274 + struct fc_seq_els_data *els_data);
7275 +
7276 + /*
7277 + * Abort an exchange and sequence. Generally called because of a
7278 + * exchange timeout or an abort from the upper layer.
7279 + *
7280 + * A timer_msec can be specified for abort timeout, if non-zero
7281 + * timer_msec value is specified then exchange resp handler
7282 + * will be called with timeout error if no response to abort.
7283 + */
7284 + int (*seq_exch_abort)(const struct fc_seq *req_sp,
7285 + unsigned int timer_msec);
7286 +
7287 + /*
7288 + * Indicate that an exchange/sequence tuple is complete and the memory
7289 + * allocated for the related objects may be freed.
7290 + */
7291 + void (*exch_done)(struct fc_seq *sp);
7292 +
7293 + /*
7294 + * Assigns a EM and a free XID for an new exchange and then
7295 + * allocates a new exchange and sequence pair.
7296 + * The fp can be used to determine free XID.
7297 + */
7298 + struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
7299 +
7300 + /*
7301 + * Release previously assigned XID by exch_get API.
7302 + * The LLD may implement this if XID is assigned by LLD
7303 + * in exch_get().
7304 + */
7305 + void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
7306 + u16 ex_id);
7307 +
7308 + /*
7309 + * Start a new sequence on the same exchange/sequence tuple.
7310 + */
7311 + struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
7312 +
7313 + /*
7314 + * Reset an exchange manager, completing all sequences and exchanges.
7315 + * If s_id is non-zero, reset only exchanges originating from that FID.
7316 + * If d_id is non-zero, reset only exchanges sending to that FID.
7317 + */
7318 + void (*exch_mgr_reset)(struct fc_exch_mgr *,
7319 + u32 s_id, u32 d_id);
7320 +
7321 + void (*rport_flush_queue)(void);
7322 + /**
7323 + * Local Port interfaces
7324 + */
7325 +
7326 + /*
7327 + * Receive a frame to a local port.
7328 + */
7329 + void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
7330 + struct fc_frame *fp);
7331 +
7332 + int (*lport_reset)(struct fc_lport *);
7333 +
7334 + /**
7335 + * Remote Port interfaces
7336 + */
7337 +
7338 + /*
7339 + * Initiates the RP state machine. It is called from the LP module.
7340 + * This function will issue the following commands to the N_Port
7341 + * identified by the FC ID provided.
7342 + *
7343 + * - PLOGI
7344 + * - PRLI
7345 + * - RTV
7346 + */
7347 + int (*rport_login)(struct fc_rport *rport);
7348 +
7349 + /*
7350 + * Logoff, and remove the rport from the transport if
7351 + * it had been added. This will send a LOGO to the target.
7352 + */
7353 + int (*rport_logoff)(struct fc_rport *rport);
7354 +
7355 + /*
7356 + * Recieve a request from a remote port.
7357 + */
7358 + void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
7359 + struct fc_rport *);
7360 +
7361 + struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
7362 +
7363 + /**
7364 + * FCP interfaces
7365 + */
7366 +
7367 + /*
7368 + * Send a fcp cmd from fsp pkt.
7369 + * Called with the SCSI host lock unlocked and irqs disabled.
7370 + *
7371 + * The resp handler is called when FCP_RSP received.
7372 + *
7373 + */
7374 + int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
7375 + void (*resp)(struct fc_seq *, struct fc_frame *fp,
7376 + void *arg));
7377 +
7378 + /*
7379 + * Used at least durring linkdown and reset
7380 + */
7381 + void (*fcp_cleanup)(struct fc_lport *lp);
7382 +
7383 + /*
7384 + * Abort all I/O on a local port
7385 + */
7386 + void (*fcp_abort_io)(struct fc_lport *lp);
7387 +
7388 + /**
7389 + * Discovery interfaces
7390 + */
7391 +
7392 + void (*disc_recv_req)(struct fc_seq *,
7393 + struct fc_frame *, struct fc_lport *);
7394 +
7395 + /*
7396 + * Start discovery for a local port.
7397 + */
7398 + void (*disc_start)(void (*disc_callback)(struct fc_lport *,
7399 + enum fc_disc_event),
7400 + struct fc_lport *);
7401 +
7402 + /*
7403 + * Stop discovery for a given lport. This will remove
7404 + * all discovered rports
7405 + */
7406 + void (*disc_stop) (struct fc_lport *);
7407 +
7408 + /*
7409 + * Stop discovery for a given lport. This will block
7410 + * until all discovered rports are deleted from the
7411 + * FC transport class
7412 + */
7413 + void (*disc_stop_final) (struct fc_lport *);
7414 +};
7415 +
7416 +struct fc_lport {
7417 + struct list_head list;
7418 +
7419 + /* Associations */
7420 + struct Scsi_Host *host;
7421 + struct fc_exch_mgr *emp;
7422 + struct fc_rport *dns_rp;
7423 + struct fc_rport *ptp_rp;
7424 + void *scsi_priv;
7425 +
7426 + /* Operational Information */
7427 + struct libfc_function_template tt;
7428 + u16 link_status;
7429 + enum fc_lport_state state;
7430 + unsigned long boot_time;
7431 +
7432 + struct fc_host_statistics host_stats;
7433 + struct fcoe_dev_stats *dev_stats[NR_CPUS];
7434 + u64 wwpn;
7435 + u64 wwnn;
7436 + u8 retry_count;
7437 +
7438 + /* Capabilities */
7439 + u32 sg_supp:1; /* scatter gather supported */
7440 + u32 seq_offload:1; /* seq offload supported */
7441 + u32 crc_offload:1; /* crc offload supported */
7442 + u32 lro_enabled:1; /* large receive offload */
7443 + u32 mfs; /* max FC payload size */
7444 + unsigned int service_params;
7445 + unsigned int e_d_tov;
7446 + unsigned int r_a_tov;
7447 + u8 max_retry_count;
7448 + u16 link_speed;
7449 + u16 link_supported_speeds;
7450 + u16 lro_xid; /* max xid for fcoe lro */
7451 + struct fc_ns_fts fcts; /* FC-4 type masks */
7452 + struct fc_els_rnid_gen rnid_gen; /* RNID information */
7453 +
7454 + /* Semaphores */
7455 + struct mutex lp_mutex;
7456 +
7457 + /* Miscellaneous */
7458 + struct delayed_work retry_work;
7459 + struct delayed_work disc_work;
7460 +};
7461 +
7462 +/**
7463 + * FC_LPORT HELPER FUNCTIONS
7464 + *****************************/
7465 +static inline void *lport_priv(const struct fc_lport *lp)
7466 +{
7467 + return (void *)(lp + 1);
7468 +}
7469 +
7470 +static inline int fc_lport_test_ready(struct fc_lport *lp)
7471 +{
7472 + return lp->state == LPORT_ST_READY;
7473 +}
7474 +
7475 +static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
7476 +{
7477 + lp->wwnn = wwnn;
7478 +}
7479 +
7480 +static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
7481 +{
7482 + lp->wwpn = wwnn;
7483 +}
7484 +
7485 +static inline void fc_lport_state_enter(struct fc_lport *lp,
7486 + enum fc_lport_state state)
7487 +{
7488 + if (state != lp->state)
7489 + lp->retry_count = 0;
7490 + lp->state = state;
7491 +}
7492 +
7493 +
7494 +/**
7495 + * LOCAL PORT LAYER
7496 + *****************************/
7497 +int fc_lport_init(struct fc_lport *lp);
7498 +
7499 +/*
7500 + * Destroy the specified local port by finding and freeing all
7501 + * fc_rports associated with it and then by freeing the fc_lport
7502 + * itself.
7503 + */
7504 +int fc_lport_destroy(struct fc_lport *lp);
7505 +
7506 +/*
7507 + * Logout the specified local port from the fabric
7508 + */
7509 +int fc_fabric_logoff(struct fc_lport *lp);
7510 +
7511 +/*
7512 + * Initiate the LP state machine. This handler will use fc_host_attr
7513 + * to store the FLOGI service parameters, so fc_host_attr must be
7514 + * initialized before calling this handler.
7515 + */
7516 +int fc_fabric_login(struct fc_lport *lp);
7517 +
7518 +/*
7519 + * The link is up for the given local port.
7520 + */
7521 +void fc_linkup(struct fc_lport *);
7522 +
7523 +/*
7524 + * Link is down for the given local port.
7525 + */
7526 +void fc_linkdown(struct fc_lport *);
7527 +
7528 +/*
7529 + * Pause and unpause traffic.
7530 + */
7531 +void fc_pause(struct fc_lport *);
7532 +void fc_unpause(struct fc_lport *);
7533 +
7534 +/*
7535 + * Configure the local port.
7536 + */
7537 +int fc_lport_config(struct fc_lport *);
7538 +
7539 +/*
7540 + * Reset the local port.
7541 + */
7542 +int fc_lport_reset(struct fc_lport *);
7543 +
7544 +/*
7545 + * Set the mfs or reset
7546 + */
7547 +int fc_set_mfs(struct fc_lport *lp, u32 mfs);
7548 +
7549 +
7550 +/**
7551 + * REMOTE PORT LAYER
7552 + *****************************/
7553 +int fc_rport_init(struct fc_lport *lp);
7554 +void fc_rport_terminate_io(struct fc_rport *rp);
7555 +
7556 +/**
7557 + * DISCOVERY LAYER
7558 + *****************************/
7559 +int fc_disc_init(struct fc_lport *lp);
7560 +
7561 +
7562 +/**
7563 + * SCSI LAYER
7564 + *****************************/
7565 +/*
7566 + * Initialize the SCSI block of libfc
7567 + */
7568 +int fc_fcp_init(struct fc_lport *);
7569 +
7570 +/*
7571 + * This section provides an API which allows direct interaction
7572 + * with the SCSI-ml. Each of these functions satisfies a function
7573 + * pointer defined in Scsi_Host and therefore is always called
7574 + * directly from the SCSI-ml.
7575 + */
7576 +int fc_queuecommand(struct scsi_cmnd *sc_cmd,
7577 + void (*done)(struct scsi_cmnd *));
7578 +
7579 +/*
7580 + * complete processing of a fcp packet
7581 + *
7582 + * This function may sleep if a fsp timer is pending.
7583 + * The host lock must not be held by caller.
7584 + */
7585 +void fc_fcp_complete(struct fc_fcp_pkt *fsp);
7586 +
7587 +/*
7588 + * Send an ABTS frame to the target device. The sc_cmd argument
7589 + * is a pointer to the SCSI command to be aborted.
7590 + */
7591 +int fc_eh_abort(struct scsi_cmnd *sc_cmd);
7592 +
7593 +/*
7594 + * Reset a LUN by sending send the tm cmd to the target.
7595 + */
7596 +int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
7597 +
7598 +/*
7599 + * Reset the host adapter.
7600 + */
7601 +int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
7602 +
7603 +/*
7604 + * Check rport status.
7605 + */
7606 +int fc_slave_alloc(struct scsi_device *sdev);
7607 +
7608 +/*
7609 + * Adjust the queue depth.
7610 + */
7611 +int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
7612 +
7613 +/*
7614 + * Change the tag type.
7615 + */
7616 +int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
7617 +
7618 +/*
7619 + * Free memory pools used by the FCP layer.
7620 + */
7621 +void fc_fcp_destroy(struct fc_lport *);
7622 +
7623 +/**
7624 + * ELS/CT interface
7625 + *****************************/
7626 +/*
7627 + * Initializes ELS/CT interface
7628 + */
7629 +int fc_elsct_init(struct fc_lport *lp);
7630 +
7631 +
7632 +/**
7633 + * EXCHANGE MANAGER LAYER
7634 + *****************************/
7635 +/*
7636 + * Initializes Exchange Manager related
7637 + * function pointers in struct libfc_function_template.
7638 + */
7639 +int fc_exch_init(struct fc_lport *lp);
7640 +
7641 +/*
7642 + * Allocates an Exchange Manager (EM).
7643 + *
7644 + * The EM manages exchanges for their allocation and
7645 + * free, also allows exchange lookup for received
7646 + * frame.
7647 + *
7648 + * The class is used for initializing FC class of
7649 + * allocated exchange from EM.
7650 + *
7651 + * The min_xid and max_xid will limit new
7652 + * exchange ID (XID) within this range for
7653 + * a new exchange.
7654 + * The LLD may choose to have multiple EMs,
7655 + * e.g. one EM instance per CPU receive thread in LLD.
7656 + * The LLD can use exch_get() of struct libfc_function_template
7657 + * to specify XID for a new exchange within
7658 + * a specified EM instance.
7659 + *
7660 + * The em_idx to uniquely identify an EM instance.
7661 + */
7662 +struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
7663 + enum fc_class class,
7664 + u16 min_xid,
7665 + u16 max_xid);
7666 +
7667 +/*
7668 + * Free an exchange manager.
7669 + */
7670 +void fc_exch_mgr_free(struct fc_exch_mgr *mp);
7671 +
7672 +/*
7673 + * Receive a frame on specified local port and exchange manager.
7674 + */
7675 +void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
7676 + struct fc_frame *fp);
7677 +
7678 +/*
7679 + * This function is for exch_seq_send function pointer in
7680 + * struct libfc_function_template, see comment block on
7681 + * exch_seq_send for description of this function.
7682 + */
7683 +struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
7684 + struct fc_frame *fp,
7685 + void (*resp)(struct fc_seq *sp,
7686 + struct fc_frame *fp,
7687 + void *arg),
7688 + void (*destructor)(struct fc_seq *sp,
7689 + void *arg),
7690 + void *arg, u32 timer_msec);
7691 +
7692 +/*
7693 + * send a frame using existing sequence and exchange.
7694 + */
7695 +int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp);
7696 +
7697 +/*
7698 + * Send ELS response using mainly infomation
7699 + * in exchange and sequence in EM layer.
7700 + */
7701 +void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
7702 + struct fc_seq_els_data *els_data);
7703 +
7704 +/*
7705 + * This function is for seq_exch_abort function pointer in
7706 + * struct libfc_function_template, see comment block on
7707 + * seq_exch_abort for description of this function.
7708 + */
7709 +int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec);
7710 +
7711 +/*
7712 + * Indicate that an exchange/sequence tuple is complete and the memory
7713 + * allocated for the related objects may be freed.
7714 + */
7715 +void fc_exch_done(struct fc_seq *sp);
7716 +
7717 +/*
7718 + * Assigns a EM and XID for a frame and then allocates
7719 + * a new exchange and sequence pair.
7720 + * The fp can be used to determine free XID.
7721 + */
7722 +struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
7723 +
7724 +/*
7725 + * Allocate a new exchange and sequence pair.
7726 + * if ex_id is zero then next free exchange id
7727 + * from specified exchange manger mp will be assigned.
7728 + */
7729 +struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
7730 + struct fc_frame *fp, u16 ex_id);
7731 +/*
7732 + * Start a new sequence on the same exchange as the supplied sequence.
7733 + */
7734 +struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
7735 +
7736 +/*
7737 + * Reset an exchange manager, completing all sequences and exchanges.
7738 + * If s_id is non-zero, reset only exchanges originating from that FID.
7739 + * If d_id is non-zero, reset only exchanges sending to that FID.
7740 + */
7741 +void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
7742 +
7743 +/*
7744 + * Functions for fc_functions_template
7745 + */
7746 +void fc_get_host_speed(struct Scsi_Host *shost);
7747 +void fc_get_host_port_type(struct Scsi_Host *shost);
7748 +void fc_get_host_port_state(struct Scsi_Host *shost);
7749 +void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
7750 +struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
7751 +
7752 +/*
7753 + * module setup functions.
7754 + */
7755 +int fc_setup_exch_mgr(void);
7756 +void fc_destroy_exch_mgr(void);
7757 +int fc_setup_rport(void);
7758 +void fc_destroy_rport(void);
7759 +
7760 +#endif /* _LIBFC_H_ */
7761 diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h
7762 deleted file mode 100644
7763 index 9508e55..0000000
7764 --- a/include/scsi/libfc/fc_frame.h
7765 +++ /dev/null
7766 @@ -1,238 +0,0 @@
7767 -/*
7768 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
7769 - *
7770 - * This program is free software; you can redistribute it and/or modify it
7771 - * under the terms and conditions of the GNU General Public License,
7772 - * version 2, as published by the Free Software Foundation.
7773 - *
7774 - * This program is distributed in the hope it will be useful, but WITHOUT
7775 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
7776 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
7777 - * more details.
7778 - *
7779 - * You should have received a copy of the GNU General Public License along with
7780 - * this program; if not, write to the Free Software Foundation, Inc.,
7781 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
7782 - *
7783 - * Maintained at www.Open-FCoE.org
7784 - */
7785 -
7786 -#ifndef _FC_FRAME_H_
7787 -#define _FC_FRAME_H_
7788 -
7789 -#include <linux/scatterlist.h>
7790 -#include <linux/skbuff.h>
7791 -
7792 -#include <scsi/fc/fc_fs.h>
7793 -#include <scsi/fc/fc_encaps.h>
7794 -
7795 -/*
7796 - * The fc_frame interface is used to pass frame data between functions.
7797 - * The frame includes the data buffer, length, and SOF / EOF delimiter types.
7798 - * A pointer to the port structure of the receiving port is also includeded.
7799 - */
7800 -
7801 -#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
7802 -#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
7803 -
7804 -/*
7805 - * Information about an individual fibre channel frame received or to be sent.
7806 - * The buffer may be in up to 4 additional non-contiguous sections,
7807 - * but the linear section must hold the frame header.
7808 - */
7809 -#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */
7810 -
7811 -#define fp_skb(fp) (&((fp)->skb))
7812 -#define fr_hdr(fp) ((fp)->skb.data)
7813 -#define fr_len(fp) ((fp)->skb.len)
7814 -#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
7815 -#define fr_dev(fp) (fr_cb(fp)->fr_dev)
7816 -#define fr_seq(fp) (fr_cb(fp)->fr_seq)
7817 -#define fr_sof(fp) (fr_cb(fp)->fr_sof)
7818 -#define fr_eof(fp) (fr_cb(fp)->fr_eof)
7819 -#define fr_flags(fp) (fr_cb(fp)->fr_flags)
7820 -#define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
7821 -
7822 -struct fc_frame {
7823 - struct sk_buff skb;
7824 -};
7825 -
7826 -struct fcoe_rcv_info {
7827 - struct packet_type *ptype;
7828 - struct fc_lport *fr_dev; /* transport layer private pointer */
7829 - struct fc_seq *fr_seq; /* for use with exchange manager */
7830 - enum fc_sof fr_sof; /* start of frame delimiter */
7831 - enum fc_eof fr_eof; /* end of frame delimiter */
7832 - u8 fr_flags; /* flags - see below */
7833 - u16 fr_max_payload; /* max FC payload */
7834 -};
7835 -
7836 -/*
7837 - * Get fc_frame pointer for an skb that's already been imported.
7838 - */
7839 -static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
7840 -{
7841 - BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
7842 - return (struct fcoe_rcv_info *) skb->cb;
7843 -}
7844 -
7845 -/*
7846 - * fr_flags.
7847 - */
7848 -#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */
7849 -
7850 -/*
7851 - * Initialize a frame.
7852 - * We don't do a complete memset here for performance reasons.
7853 - * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
7854 - */
7855 -static inline void fc_frame_init(struct fc_frame *fp)
7856 -{
7857 - fr_dev(fp) = NULL;
7858 - fr_seq(fp) = NULL;
7859 - fr_flags(fp) = 0;
7860 -}
7861 -
7862 -struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
7863 -
7864 -struct fc_frame *__fc_frame_alloc(size_t payload_len);
7865 -
7866 -/*
7867 - * Get frame for sending via port.
7868 - */
7869 -static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
7870 - size_t payload_len)
7871 -{
7872 - return __fc_frame_alloc(payload_len);
7873 -}
7874 -
7875 -/*
7876 - * Allocate fc_frame structure and buffer. Set the initial length to
7877 - * payload_size + sizeof (struct fc_frame_header).
7878 - */
7879 -static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
7880 -{
7881 - struct fc_frame *fp;
7882 -
7883 - /*
7884 - * Note: Since len will often be a constant multiple of 4,
7885 - * this check will usually be evaluated and eliminated at compile time.
7886 - */
7887 - if ((len % 4) != 0)
7888 - fp = fc_frame_alloc_fill(dev, len);
7889 - else
7890 - fp = _fc_frame_alloc(dev, len);
7891 - return fp;
7892 -}
7893 -
7894 -/*
7895 - * Free the fc_frame structure and buffer.
7896 - */
7897 -static inline void fc_frame_free(struct fc_frame *fp)
7898 -{
7899 - kfree_skb(fp_skb(fp));
7900 -}
7901 -
7902 -static inline int fc_frame_is_linear(struct fc_frame *fp)
7903 -{
7904 - return !skb_is_nonlinear(fp_skb(fp));
7905 -}
7906 -
7907 -/*
7908 - * Get frame header from message in fc_frame structure.
7909 - * This hides a cast and provides a place to add some checking.
7910 - */
7911 -static inline
7912 -struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
7913 -{
7914 - WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
7915 - return (struct fc_frame_header *) fr_hdr(fp);
7916 -}
7917 -
7918 -/*
7919 - * Get frame payload from message in fc_frame structure.
7920 - * This hides a cast and provides a place to add some checking.
7921 - * The len parameter is the minimum length for the payload portion.
7922 - * Returns NULL if the frame is too short.
7923 - *
7924 - * This assumes the interesting part of the payload is in the first part
7925 - * of the buffer for received data. This may not be appropriate to use for
7926 - * buffers being transmitted.
7927 - */
7928 -static inline void *fc_frame_payload_get(const struct fc_frame *fp,
7929 - size_t len)
7930 -{
7931 - void *pp = NULL;
7932 -
7933 - if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
7934 - pp = fc_frame_header_get(fp) + 1;
7935 - return pp;
7936 -}
7937 -
7938 -/*
7939 - * Get frame payload opcode (first byte) from message in fc_frame structure.
7940 - * This hides a cast and provides a place to add some checking. Return 0
7941 - * if the frame has no payload.
7942 - */
7943 -static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
7944 -{
7945 - u8 *cp;
7946 -
7947 - cp = fc_frame_payload_get(fp, sizeof(u8));
7948 - if (!cp)
7949 - return 0;
7950 - return *cp;
7951 -
7952 -}
7953 -
7954 -/*
7955 - * Get FC class from frame.
7956 - */
7957 -static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
7958 -{
7959 - return fc_sof_class(fr_sof(fp));
7960 -}
7961 -
7962 -/*
7963 - * Set r_ctl and type in preparation for sending frame.
7964 - * This also clears fh_parm_offset.
7965 - */
7966 -static inline void fc_frame_setup(struct fc_frame *fp, enum fc_rctl r_ctl,
7967 - enum fc_fh_type type)
7968 -{
7969 - struct fc_frame_header *fh;
7970 -
7971 - fh = fc_frame_header_get(fp);
7972 - WARN_ON(r_ctl == 0);
7973 - fh->fh_r_ctl = r_ctl;
7974 - fh->fh_type = type;
7975 - fh->fh_parm_offset = htonl(0);
7976 -}
7977 -
7978 -/*
7979 - * Set offset in preparation for sending frame.
7980 - */
7981 -static inline void
7982 -fc_frame_set_offset(struct fc_frame *fp, u32 offset)
7983 -{
7984 - struct fc_frame_header *fh;
7985 -
7986 - fh = fc_frame_header_get(fp);
7987 - fh->fh_parm_offset = htonl(offset);
7988 -}
7989 -
7990 -/*
7991 - * Check the CRC in a frame.
7992 - * The CRC immediately follows the last data item *AFTER* the length.
7993 - * The return value is zero if the CRC matches.
7994 - */
7995 -u32 fc_frame_crc_check(struct fc_frame *);
7996 -
7997 -/*
7998 - * Check for leaks.
7999 - * Print the frame header of any currently allocated frame, assuming there
8000 - * should be none at this point.
8001 - */
8002 -void fc_frame_leak_check(void);
8003 -
8004 -#endif /* _FC_FRAME_H_ */
8005 diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
8006 deleted file mode 100644
8007 index 237abd3..0000000
8008 --- a/include/scsi/libfc/libfc.h
8009 +++ /dev/null
8010 @@ -1,860 +0,0 @@
8011 -/*
8012 - * Copyright(c) 2007 Intel Corporation. All rights reserved.
8013 - *
8014 - * This program is free software; you can redistribute it and/or modify it
8015 - * under the terms and conditions of the GNU General Public License,
8016 - * version 2, as published by the Free Software Foundation.
8017 - *
8018 - * This program is distributed in the hope it will be useful, but WITHOUT
8019 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8020 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8021 - * more details.
8022 - *
8023 - * You should have received a copy of the GNU General Public License along with
8024 - * this program; if not, write to the Free Software Foundation, Inc.,
8025 - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8026 - *
8027 - * Maintained at www.Open-FCoE.org
8028 - */
8029 -
8030 -#ifndef _LIBFC_H_
8031 -#define _LIBFC_H_
8032 -
8033 -#include <linux/timer.h>
8034 -#include <linux/if.h>
8035 -
8036 -#include <scsi/scsi_transport.h>
8037 -#include <scsi/scsi_transport_fc.h>
8038 -
8039 -#include <scsi/fc/fc_fcp.h>
8040 -#include <scsi/fc/fc_ns.h>
8041 -#include <scsi/fc/fc_els.h>
8042 -#include <scsi/fc/fc_gs.h>
8043 -
8044 -#include <scsi/libfc/fc_frame.h>
8045 -
8046 -#define LIBFC_DEBUG
8047 -
8048 -#ifdef LIBFC_DEBUG
8049 -/* Log messages */
8050 -#define FC_DBG(fmt, args...) \
8051 - do { \
8052 - printk(KERN_INFO "%s " fmt, __func__, ##args); \
8053 - } while (0)
8054 -#else
8055 -#define FC_DBG(fmt, args...)
8056 -#endif
8057 -
8058 -/*
8059 - * libfc error codes
8060 - */
8061 -#define FC_NO_ERR 0 /* no error */
8062 -#define FC_EX_TIMEOUT 1 /* Exchange timeout */
8063 -#define FC_EX_CLOSED 2 /* Exchange closed */
8064 -
8065 -/* some helpful macros */
8066 -
8067 -#define ntohll(x) be64_to_cpu(x)
8068 -#define htonll(x) cpu_to_be64(x)
8069 -
8070 -#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
8071 -
8072 -#define hton24(p, v) do { \
8073 - p[0] = (((v) >> 16) & 0xFF); \
8074 - p[1] = (((v) >> 8) & 0xFF); \
8075 - p[2] = ((v) & 0xFF); \
8076 - } while (0)
8077 -
8078 -struct fc_exch_mgr;
8079 -
8080 -/*
8081 - * FC HBA status
8082 - */
8083 -#define FC_PAUSE (1 << 1)
8084 -#define FC_LINK_UP (1 << 0)
8085 -
8086 -enum fc_lport_state {
8087 - LPORT_ST_NONE = 0,
8088 - LPORT_ST_FLOGI,
8089 - LPORT_ST_DNS,
8090 - LPORT_ST_RPN_ID,
8091 - LPORT_ST_RFT_ID,
8092 - LPORT_ST_SCR,
8093 - LPORT_ST_READY,
8094 - LPORT_ST_LOGO,
8095 - LPORT_ST_RESET
8096 -};
8097 -
8098 -enum fc_lport_event {
8099 - LPORT_EV_RPORT_NONE = 0,
8100 - LPORT_EV_RPORT_CREATED,
8101 - LPORT_EV_RPORT_FAILED,
8102 - LPORT_EV_RPORT_STOP,
8103 - LPORT_EV_RPORT_LOGO
8104 -};
8105 -
8106 -enum fc_rport_state {
8107 - RPORT_ST_NONE = 0,
8108 - RPORT_ST_INIT, /* initialized */
8109 - RPORT_ST_PLOGI, /* waiting for PLOGI completion */
8110 - RPORT_ST_PRLI, /* waiting for PRLI completion */
8111 - RPORT_ST_RTV, /* waiting for RTV completion */
8112 - RPORT_ST_READY, /* ready for use */
8113 - RPORT_ST_LOGO, /* port logout sent */
8114 -};
8115 -
8116 -enum fc_rport_trans_state {
8117 - FC_PORTSTATE_ROGUE,
8118 - FC_PORTSTATE_REAL,
8119 -};
8120 -
8121 -/**
8122 - * struct fc_disc_port - temporary discovery port to hold rport identifiers
8123 - * @lp: Fibre Channel host port instance
8124 - * @peers: node for list management during discovery and RSCN processing
8125 - * @ids: identifiers structure to pass to fc_remote_port_add()
8126 - * @rport_work: work struct for starting the rport state machine
8127 - */
8128 -struct fc_disc_port {
8129 - struct fc_lport *lp;
8130 - struct list_head peers;
8131 - struct fc_rport_identifiers ids;
8132 - struct work_struct rport_work;
8133 -};
8134 -
8135 -/**
8136 - * struct fc_rport_libfc_priv - libfc internal information about a remote port
8137 - * @local_port: Fibre Channel host port instance
8138 - * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
8139 - * @flags: REC and RETRY supported flags
8140 - * @max_seq: maximum number of concurrent sequences
8141 - * @retries: retry count in current state
8142 - * @e_d_tov: error detect timeout value (in msec)
8143 - * @r_a_tov: resource allocation timeout value (in msec)
8144 - * @rp_mutex: mutex protects rport
8145 - * @retry_work:
8146 - * @event_callback: Callback for rport READY, FAILED or LOGO
8147 - */
8148 -struct fc_rport_libfc_priv {
8149 - struct fc_lport *local_port;
8150 - enum fc_rport_state rp_state;
8151 - u16 flags;
8152 - #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
8153 - #define FC_RP_FLAGS_RETRY (1 << 1)
8154 - u16 max_seq;
8155 - unsigned int retries;
8156 - unsigned int e_d_tov;
8157 - unsigned int r_a_tov;
8158 - enum fc_rport_trans_state trans_state;
8159 - struct mutex rp_mutex;
8160 - struct delayed_work retry_work;
8161 - enum fc_lport_event event;
8162 - void (*event_callback)(struct fc_lport *,
8163 - struct fc_rport *,
8164 - enum fc_lport_event);
8165 - struct list_head peers;
8166 - struct work_struct event_work;
8167 -};
8168 -
8169 -#define PRIV_TO_RPORT(x) \
8170 - (struct fc_rport *)((void *)x - sizeof(struct fc_rport));
8171 -#define RPORT_TO_PRIV(x) \
8172 - (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
8173 -
8174 -struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
8175 -void fc_rport_rogue_destroy(struct fc_rport *);
8176 -
8177 -static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
8178 -{
8179 - rport->node_name = wwnn;
8180 - rport->port_name = wwpn;
8181 -}
8182 -
8183 -/*
8184 - * fcoe stats structure
8185 - */
8186 -struct fcoe_dev_stats {
8187 - u64 SecondsSinceLastReset;
8188 - u64 TxFrames;
8189 - u64 TxWords;
8190 - u64 RxFrames;
8191 - u64 RxWords;
8192 - u64 ErrorFrames;
8193 - u64 DumpedFrames;
8194 - u64 LinkFailureCount;
8195 - u64 LossOfSignalCount;
8196 - u64 InvalidTxWordCount;
8197 - u64 InvalidCRCCount;
8198 - u64 InputRequests;
8199 - u64 OutputRequests;
8200 - u64 ControlRequests;
8201 - u64 InputMegabytes;
8202 - u64 OutputMegabytes;
8203 -};
8204 -
8205 -/*
8206 - * els data is used for passing ELS respone specific
8207 - * data to send ELS response mainly using infomation
8208 - * in exchange and sequence in EM layer.
8209 - */
8210 -struct fc_seq_els_data {
8211 - struct fc_frame *fp;
8212 - enum fc_els_rjt_reason reason;
8213 - enum fc_els_rjt_explan explan;
8214 -};
8215 -
8216 -/*
8217 - * FCP request structure, one for each scsi cmd request
8218 - */
8219 -struct fc_fcp_pkt {
8220 - /*
8221 - * housekeeping stuff
8222 - */
8223 - struct fc_lport *lp; /* handle to hba struct */
8224 - u16 state; /* scsi_pkt state state */
8225 - u16 tgt_flags; /* target flags */
8226 - atomic_t ref_cnt; /* fcp pkt ref count */
8227 - spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
8228 - * if both are held at the same time */
8229 - /*
8230 - * SCSI I/O related stuff
8231 - */
8232 - struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
8233 - * under host lock */
8234 - struct list_head list; /* tracks queued commands. access under
8235 - * host lock */
8236 - /*
8237 - * timeout related stuff
8238 - */
8239 - struct timer_list timer; /* command timer */
8240 - struct completion tm_done;
8241 - int wait_for_comp;
8242 - unsigned long start_time; /* start jiffie */
8243 - unsigned long end_time; /* end jiffie */
8244 - unsigned long last_pkt_time; /* jiffies of last frame received */
8245 -
8246 - /*
8247 - * scsi cmd and data transfer information
8248 - */
8249 - u32 data_len;
8250 - /*
8251 - * transport related veriables
8252 - */
8253 - struct fcp_cmnd cdb_cmd;
8254 - size_t xfer_len;
8255 - u32 xfer_contig_end; /* offset of end of contiguous xfer */
8256 - u16 max_payload; /* max payload size in bytes */
8257 -
8258 - /*
8259 - * scsi/fcp return status
8260 - */
8261 - u32 io_status; /* SCSI result upper 24 bits */
8262 - u8 cdb_status;
8263 - u8 status_code; /* FCP I/O status */
8264 - /* bit 3 Underrun bit 2: overrun */
8265 - u8 scsi_comp_flags;
8266 - u32 req_flags; /* bit 0: read bit:1 write */
8267 - u32 scsi_resid; /* residule length */
8268 -
8269 - struct fc_rport *rport; /* remote port pointer */
8270 - struct fc_seq *seq_ptr; /* current sequence pointer */
8271 - /*
8272 - * Error Processing
8273 - */
8274 - u8 recov_retry; /* count of recovery retries */
8275 - struct fc_seq *recov_seq; /* sequence for REC or SRR */
8276 -};
8277 -
8278 -struct libfc_function_template {
8279 -
8280 - /**
8281 - * Mandatory Fields
8282 - *
8283 - * These handlers must be implemented by the LLD.
8284 - */
8285 -
8286 - /*
8287 - * Interface to send a FC frame
8288 - */
8289 - int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
8290 -
8291 - /**
8292 - * Optional Fields
8293 - *
8294 - * The LLD may choose to implement any of the following handlers.
8295 - * If LLD doesn't specify hander and leaves its pointer NULL then
8296 - * the default libfc function will be used for that handler.
8297 - */
8298 -
8299 - /**
8300 - * Exhance Manager interfaces
8301 - */
8302 -
8303 - /*
8304 - * Send the FC frame payload using a new exchange and sequence.
8305 - *
8306 - * The frame pointer with some of the header's fields must be
8307 - * filled before calling exch_seq_send(), those fields are,
8308 - *
8309 - * - routing control
8310 - * - FC header type
8311 - * - parameter or relative offset
8312 - *
8313 - * The exchange response handler is set in this routine to resp()
8314 - * function pointer. It can be called in two scenarios: if a timeout
8315 - * occurs or if a response frame is received for the exchange. The
8316 - * fc_frame pointer in response handler will also indicate timeout
8317 - * as error using IS_ERR related macros.
8318 - *
8319 - * The exchange destructor handler is also set in this routine.
8320 - * The destructor handler is invoked by EM layer when exchange
8321 - * is about to free, this can be used by caller to free its
8322 - * resources along with exchange free.
8323 - *
8324 - * The arg is passed back to resp and destructor handler.
8325 - *
8326 - * The timeout value (in msec) for an exchange is set if non zero
8327 - * timer_msec argument is specified. The timer is canceled when
8328 - * it fires or when the exchange is done. The exchange timeout handler
8329 - * is registered by EM layer.
8330 - *
8331 - * The caller also need to specify FC sid, did and frame control field.
8332 - */
8333 - struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
8334 - struct fc_frame *fp,
8335 - void (*resp)(struct fc_seq *sp,
8336 - struct fc_frame *fp,
8337 - void *arg),
8338 - void (*destructor)(struct fc_seq *sp,
8339 - void *arg),
8340 - void *arg, unsigned int timer_msec,
8341 - u32 sid, u32 did, u32 f_ctl);
8342 -
8343 - /*
8344 - * send a frame using existing sequence and exchange.
8345 - */
8346 - int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
8347 - struct fc_frame *fp, u32 f_ctl);
8348 -
8349 - /*
8350 - * Send ELS response using mainly infomation
8351 - * in exchange and sequence in EM layer.
8352 - */
8353 - void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
8354 - struct fc_seq_els_data *els_data);
8355 -
8356 - /*
8357 - * Abort an exchange and sequence. Generally called because of a
8358 - * exchange timeout or an abort from the upper layer.
8359 - *
8360 - * A timer_msec can be specified for abort timeout, if non-zero
8361 - * timer_msec value is specified then exchange resp handler
8362 - * will be called with timeout error if no response to abort.
8363 - */
8364 - int (*seq_exch_abort)(const struct fc_seq *req_sp,
8365 - unsigned int timer_msec);
8366 -
8367 - /*
8368 - * Indicate that an exchange/sequence tuple is complete and the memory
8369 - * allocated for the related objects may be freed.
8370 - */
8371 - void (*exch_done)(struct fc_seq *sp);
8372 -
8373 - /*
8374 - * Assigns a EM and a free XID for an new exchange and then
8375 - * allocates a new exchange and sequence pair.
8376 - * The fp can be used to determine free XID.
8377 - */
8378 - struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
8379 -
8380 - /*
8381 - * Release previously assigned XID by exch_get API.
8382 - * The LLD may implement this if XID is assigned by LLD
8383 - * in exch_get().
8384 - */
8385 - void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
8386 - u16 ex_id);
8387 -
8388 - /*
8389 - * Start a new sequence on the same exchange/sequence tuple.
8390 - */
8391 - struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
8392 -
8393 - /*
8394 - * Reset an exchange manager, completing all sequences and exchanges.
8395 - * If s_id is non-zero, reset only exchanges originating from that FID.
8396 - * If d_id is non-zero, reset only exchanges sending to that FID.
8397 - */
8398 - void (*exch_mgr_reset)(struct fc_exch_mgr *,
8399 - u32 s_id, u32 d_id);
8400 -
8401 - /*
8402 - * Get exchange Ids of a sequence
8403 - */
8404 - void (*seq_get_xids)(struct fc_seq *sp, u16 *oxid, u16 *rxid);
8405 -
8406 - /*
8407 - * Set REC data to a sequence
8408 - */
8409 - void (*seq_set_rec_data)(struct fc_seq *sp, u32 rec_data);
8410 -
8411 - /**
8412 - * Local Port interfaces
8413 - */
8414 -
8415 - /*
8416 - * Receive a frame to a local port.
8417 - */
8418 - void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
8419 - struct fc_frame *fp);
8420 -
8421 - int (*lport_reset)(struct fc_lport *);
8422 -
8423 - void (*event_callback)(struct fc_lport *, struct fc_rport *,
8424 - enum fc_lport_event);
8425 -
8426 - /**
8427 - * Remote Port interfaces
8428 - */
8429 -
8430 - /*
8431 - * Initiates the RP state machine. It is called from the LP module.
8432 - * This function will issue the following commands to the N_Port
8433 - * identified by the FC ID provided.
8434 - *
8435 - * - PLOGI
8436 - * - PRLI
8437 - * - RTV
8438 - */
8439 - int (*rport_login)(struct fc_rport *rport);
8440 -
8441 - /*
8442 - * Logs the specified local port out of a N_Port identified
8443 - * by the ID provided.
8444 - */
8445 - int (*rport_logout)(struct fc_rport *rport);
8446 -
8447 - /*
8448 - * Delete the rport and remove it from the transport if
8449 - * it had been added. This will not send a LOGO, use
8450 - * rport_logout for a gracefull logout.
8451 - */
8452 - int (*rport_stop)(struct fc_rport *rport);
8453 -
8454 - /*
8455 - * Recieve a request from a remote port.
8456 - */
8457 - void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
8458 - struct fc_rport *);
8459 -
8460 - struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
8461 -
8462 - /**
8463 - * FCP interfaces
8464 - */
8465 -
8466 - /*
8467 - * Send a fcp cmd from fsp pkt.
8468 - * Called with the SCSI host lock unlocked and irqs disabled.
8469 - *
8470 - * The resp handler is called when FCP_RSP received.
8471 - *
8472 - */
8473 - int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
8474 - void (*resp)(struct fc_seq *, struct fc_frame *fp,
8475 - void *arg));
8476 -
8477 - /*
8478 - * Used at least durring linkdown and reset
8479 - */
8480 - void (*fcp_cleanup)(struct fc_lport *lp);
8481 -
8482 - /*
8483 - * Abort all I/O on a local port
8484 - */
8485 - void (*fcp_abort_io)(struct fc_lport *lp);
8486 -
8487 - /**
8488 - * Discovery interfaces
8489 - */
8490 -
8491 - void (*disc_recv_req)(struct fc_seq *,
8492 - struct fc_frame *, struct fc_lport *);
8493 -
8494 - /*
8495 - * Start discovery for a local port.
8496 - */
8497 - int (*disc_start)(struct fc_lport *);
8498 -};
8499 -
8500 -struct fc_lport {
8501 - struct list_head list;
8502 -
8503 - /* Associations */
8504 - struct Scsi_Host *host;
8505 - struct fc_exch_mgr *emp;
8506 - struct fc_rport *dns_rp;
8507 - struct fc_rport *ptp_rp;
8508 - void *scsi_priv;
8509 - struct list_head rports;
8510 -
8511 - /* Operational Information */
8512 - struct libfc_function_template tt;
8513 - u16 link_status;
8514 - u8 disc_done;
8515 - enum fc_lport_state state;
8516 - unsigned long boot_time;
8517 -
8518 - struct fc_host_statistics host_stats;
8519 - struct fcoe_dev_stats *dev_stats[NR_CPUS];
8520 -
8521 - u64 wwpn;
8522 - u64 wwnn;
8523 - u8 retry_count;
8524 - unsigned char disc_retry_count;
8525 - unsigned char disc_delay;
8526 - unsigned char disc_pending;
8527 - unsigned char disc_requested;
8528 - unsigned short disc_seq_count;
8529 - unsigned char disc_buf_len;
8530 -
8531 - /* Capabilities */
8532 - u32 sg_supp:1; /* scatter gather supported */
8533 - u32 seq_offload:1; /* seq offload supported */
8534 - u32 mfs; /* max FC payload size */
8535 - unsigned int service_params;
8536 - unsigned int e_d_tov;
8537 - unsigned int r_a_tov;
8538 - u8 max_retry_count;
8539 - u16 link_speed;
8540 - u16 link_supported_speeds;
8541 - struct fc_ns_fts fcts; /* FC-4 type masks */
8542 - struct fc_els_rnid_gen rnid_gen; /* RNID information */
8543 -
8544 - /* Semaphores */
8545 - struct mutex lp_mutex;
8546 -
8547 - /* Miscellaneous */
8548 - struct fc_gpn_ft_resp disc_buf; /* partial name buffer */
8549 - struct delayed_work retry_work;
8550 - struct delayed_work disc_work;
8551 -
8552 - void *drv_priv;
8553 -};
8554 -
8555 -/**
8556 - * FC_LPORT HELPER FUNCTIONS
8557 - *****************************/
8558 -
8559 -static inline int fc_lport_test_ready(struct fc_lport *lp)
8560 -{
8561 - return lp->state == LPORT_ST_READY;
8562 -}
8563 -
8564 -static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
8565 -{
8566 - lp->wwnn = wwnn;
8567 -}
8568 -
8569 -static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
8570 -{
8571 - lp->wwpn = wwnn;
8572 -}
8573 -
8574 -/**
8575 - * fc_fill_dns_hdr - Fill in a name service request header
8576 - * @lp: Fibre Channel host port instance
8577 - * @ct: Common Transport (CT) header structure
8578 - * @op: Name Service request code
8579 - * @req_size: Full size of Name Service request
8580 - */
8581 -static inline void fc_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
8582 - unsigned int op, unsigned int req_size)
8583 -{
8584 - memset(ct, 0, sizeof(*ct) + req_size);
8585 - ct->ct_rev = FC_CT_REV;
8586 - ct->ct_fs_type = FC_FST_DIR;
8587 - ct->ct_fs_subtype = FC_NS_SUBTYPE;
8588 - ct->ct_cmd = htons((u16) op);
8589 -}
8590 -
8591 -static inline void fc_lport_state_enter(struct fc_lport *lp,
8592 - enum fc_lport_state state)
8593 -{
8594 - if (state != lp->state)
8595 - lp->retry_count = 0;
8596 - lp->state = state;
8597 -}
8598 -
8599 -
8600 -/**
8601 - * LOCAL PORT LAYER
8602 - *****************************/
8603 -int fc_lport_init(struct fc_lport *lp);
8604 -
8605 -/*
8606 - * Destroy the specified local port by finding and freeing all
8607 - * fc_rports associated with it and then by freeing the fc_lport
8608 - * itself.
8609 - */
8610 -int fc_lport_destroy(struct fc_lport *lp);
8611 -
8612 -/*
8613 - * Logout the specified local port from the fabric
8614 - */
8615 -int fc_fabric_logoff(struct fc_lport *lp);
8616 -
8617 -/*
8618 - * Initiate the LP state machine. This handler will use fc_host_attr
8619 - * to store the FLOGI service parameters, so fc_host_attr must be
8620 - * initialized before calling this handler.
8621 - */
8622 -int fc_fabric_login(struct fc_lport *lp);
8623 -
8624 -/*
8625 - * The link is up for the given local port.
8626 - */
8627 -void fc_linkup(struct fc_lport *);
8628 -
8629 -/*
8630 - * Link is down for the given local port.
8631 - */
8632 -void fc_linkdown(struct fc_lport *);
8633 -
8634 -/*
8635 - * Pause and unpause traffic.
8636 - */
8637 -void fc_pause(struct fc_lport *);
8638 -void fc_unpause(struct fc_lport *);
8639 -
8640 -/*
8641 - * Configure the local port.
8642 - */
8643 -int fc_lport_config(struct fc_lport *);
8644 -
8645 -/*
8646 - * Reset the local port.
8647 - */
8648 -int fc_lport_reset(struct fc_lport *);
8649 -
8650 -/*
8651 - * Set the mfs or reset
8652 - */
8653 -int fc_set_mfs(struct fc_lport *lp, u32 mfs);
8654 -
8655 -
8656 -/**
8657 - * REMOTE PORT LAYER
8658 - *****************************/
8659 -int fc_rport_init(struct fc_lport *lp);
8660 -void fc_rport_terminate_io(struct fc_rport *rp);
8661 -
8662 -/**
8663 - * DISCOVERY LAYER
8664 - *****************************/
8665 -int fc_disc_init(struct fc_lport *lp);
8666 -
8667 -
8668 -/**
8669 - * SCSI LAYER
8670 - *****************************/
8671 -/*
8672 - * Initialize the SCSI block of libfc
8673 - */
8674 -int fc_fcp_init(struct fc_lport *);
8675 -
8676 -/*
8677 - * This section provides an API which allows direct interaction
8678 - * with the SCSI-ml. Each of these functions satisfies a function
8679 - * pointer defined in Scsi_Host and therefore is always called
8680 - * directly from the SCSI-ml.
8681 - */
8682 -int fc_queuecommand(struct scsi_cmnd *sc_cmd,
8683 - void (*done)(struct scsi_cmnd *));
8684 -
8685 -/*
8686 - * complete processing of a fcp packet
8687 - *
8688 - * This function may sleep if a fsp timer is pending.
8689 - * The host lock must not be held by caller.
8690 - */
8691 -void fc_fcp_complete(struct fc_fcp_pkt *fsp);
8692 -
8693 -/*
8694 - * Send an ABTS frame to the target device. The sc_cmd argument
8695 - * is a pointer to the SCSI command to be aborted.
8696 - */
8697 -int fc_eh_abort(struct scsi_cmnd *sc_cmd);
8698 -
8699 -/*
8700 - * Reset a LUN by sending send the tm cmd to the target.
8701 - */
8702 -int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
8703 -
8704 -/*
8705 - * Reset the host adapter.
8706 - */
8707 -int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
8708 -
8709 -/*
8710 - * Check rport status.
8711 - */
8712 -int fc_slave_alloc(struct scsi_device *sdev);
8713 -
8714 -/*
8715 - * Adjust the queue depth.
8716 - */
8717 -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
8718 -
8719 -/*
8720 - * Change the tag type.
8721 - */
8722 -int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
8723 -
8724 -/*
8725 - * Free memory pools used by the FCP layer.
8726 - */
8727 -void fc_fcp_destroy(struct fc_lport *);
8728 -
8729 -
8730 -/**
8731 - * EXCHANGE MANAGER LAYER
8732 - *****************************/
8733 -/*
8734 - * Initializes Exchange Manager related
8735 - * function pointers in struct libfc_function_template.
8736 - */
8737 -int fc_exch_init(struct fc_lport *lp);
8738 -
8739 -/*
8740 - * Allocates an Exchange Manager (EM).
8741 - *
8742 - * The EM manages exchanges for their allocation and
8743 - * free, also allows exchange lookup for received
8744 - * frame.
8745 - *
8746 - * The class is used for initializing FC class of
8747 - * allocated exchange from EM.
8748 - *
8749 - * The min_xid and max_xid will limit new
8750 - * exchange ID (XID) within this range for
8751 - * a new exchange.
8752 - * The LLD may choose to have multiple EMs,
8753 - * e.g. one EM instance per CPU receive thread in LLD.
8754 - * The LLD can use exch_get() of struct libfc_function_template
8755 - * to specify XID for a new exchange within
8756 - * a specified EM instance.
8757 - *
8758 - * The em_idx to uniquely identify an EM instance.
8759 - */
8760 -struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
8761 - enum fc_class class,
8762 - u16 min_xid,
8763 - u16 max_xid);
8764 -
8765 -/*
8766 - * Free an exchange manager.
8767 - */
8768 -void fc_exch_mgr_free(struct fc_exch_mgr *mp);
8769 -
8770 -/*
8771 - * Receive a frame on specified local port and exchange manager.
8772 - */
8773 -void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
8774 - struct fc_frame *fp);
8775 -
8776 -/*
8777 - * This function is for exch_seq_send function pointer in
8778 - * struct libfc_function_template, see comment block on
8779 - * exch_seq_send for description of this function.
8780 - */
8781 -struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
8782 - struct fc_frame *fp,
8783 - void (*resp)(struct fc_seq *sp,
8784 - struct fc_frame *fp,
8785 - void *arg),
8786 - void (*destructor)(struct fc_seq *sp,
8787 - void *arg),
8788 - void *arg, u32 timer_msec,
8789 - u32 sid, u32 did, u32 f_ctl);
8790 -
8791 -/*
8792 - * send a frame using existing sequence and exchange.
8793 - */
8794 -int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
8795 - struct fc_frame *fp, u32 f_ctl);
8796 -
8797 -/*
8798 - * Send ELS response using mainly infomation
8799 - * in exchange and sequence in EM layer.
8800 - */
8801 -void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
8802 - struct fc_seq_els_data *els_data);
8803 -
8804 -/*
8805 - * This function is for seq_exch_abort function pointer in
8806 - * struct libfc_function_template, see comment block on
8807 - * seq_exch_abort for description of this function.
8808 - */
8809 -int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec);
8810 -
8811 -/*
8812 - * Indicate that an exchange/sequence tuple is complete and the memory
8813 - * allocated for the related objects may be freed.
8814 - */
8815 -void fc_exch_done(struct fc_seq *sp);
8816 -
8817 -/*
8818 - * Assigns a EM and XID for a frame and then allocates
8819 - * a new exchange and sequence pair.
8820 - * The fp can be used to determine free XID.
8821 - */
8822 -struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
8823 -
8824 -/*
8825 - * Allocate a new exchange and sequence pair.
8826 - * if ex_id is zero then next free exchange id
8827 - * from specified exchange manger mp will be assigned.
8828 - */
8829 -struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 ex_id);
8830 -
8831 -/*
8832 - * Start a new sequence on the same exchange as the supplied sequence.
8833 - */
8834 -struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
8835 -
8836 -/*
8837 - * Reset an exchange manager, completing all sequences and exchanges.
8838 - * If s_id is non-zero, reset only exchanges originating from that FID.
8839 - * If d_id is non-zero, reset only exchanges sending to that FID.
8840 - */
8841 -void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
8842 -
8843 -/*
8844 - * Get exchange Ids of a sequence
8845 - */
8846 -void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid);
8847 -
8848 -/*
8849 - * Set REC data to a sequence
8850 - */
8851 -void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data);
8852 -
8853 -/*
8854 - * Functions for fc_functions_template
8855 - */
8856 -void fc_get_host_speed(struct Scsi_Host *shost);
8857 -void fc_get_host_port_type(struct Scsi_Host *shost);
8858 -void fc_get_host_port_state(struct Scsi_Host *shost);
8859 -void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
8860 -struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
8861 -
8862 -/*
8863 - * module setup functions.
8864 - */
8865 -int fc_setup_exch_mgr(void);
8866 -void fc_destroy_exch_mgr(void);
8867 -int fc_setup_rport(void);
8868 -void fc_destroy_rport(void);
8869 -
8870 -#endif /* _LIBFC_H_ */
8871 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
8872 new file mode 100644
8873 index 0000000..4ca5902
8874 --- /dev/null
8875 +++ b/include/scsi/libfcoe.h
8876 @@ -0,0 +1,177 @@
8877 +/*
8878 + * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
8879 + *
8880 + * This program is free software; you can redistribute it and/or modify it
8881 + * under the terms and conditions of the GNU General Public License,
8882 + * version 2, as published by the Free Software Foundation.
8883 + *
8884 + * This program is distributed in the hope it will be useful, but WITHOUT
8885 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8886 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
8887 + * more details.
8888 + *
8889 + * You should have received a copy of the GNU General Public License along with
8890 + * this program; if not, write to the Free Software Foundation, Inc.,
8891 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
8892 + *
8893 + * Maintained at www.Open-FCoE.org
8894 + */
8895 +
8896 +#ifndef _LIBFCOE_H
8897 +#define _LIBFCOE_H
8898 +
8899 +#include <linux/netdevice.h>
8900 +#include <linux/skbuff.h>
8901 +#include <scsi/fc/fc_fcoe.h>
8902 +#include <scsi/libfc.h>
8903 +
8904 +/*
8905 + * this percpu struct for fcoe
8906 + */
8907 +struct fcoe_percpu_s {
8908 + int cpu;
8909 + struct task_struct *thread;
8910 + struct sk_buff_head fcoe_rx_list;
8911 + struct page *crc_eof_page;
8912 + int crc_eof_offset;
8913 +};
8914 +
8915 +/*
8916 + * the fcoe sw transport private data
8917 + */
8918 +struct fcoe_softc {
8919 + struct list_head list;
8920 + struct fc_lport *lp;
8921 + struct net_device *real_dev;
8922 + struct net_device *phys_dev; /* device with ethtool_ops */
8923 + struct packet_type fcoe_packet_type;
8924 + struct sk_buff_head fcoe_pending_queue;
8925 + u16 user_mfs; /* configured max frame size */
8926 +
8927 + u8 dest_addr[ETH_ALEN];
8928 + u8 ctl_src_addr[ETH_ALEN];
8929 + u8 data_src_addr[ETH_ALEN];
8930 + /*
8931 + * fcoe protocol address learning related stuff
8932 + */
8933 + u16 flogi_oxid;
8934 + u8 flogi_progress;
8935 + u8 address_mode;
8936 +};
8937 +
8938 +static inline struct fcoe_softc *fcoe_softc(
8939 + const struct fc_lport *lp)
8940 +{
8941 + return (struct fcoe_softc *)lport_priv(lp);
8942 +}
8943 +
8944 +static inline struct net_device *fcoe_netdev(
8945 + const struct fc_lport *lp)
8946 +{
8947 + return fcoe_softc(lp)->real_dev;
8948 +}
8949 +
8950 +static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
8951 +{
8952 + return (struct fcoe_hdr *)skb_network_header(skb);
8953 +}
8954 +
8955 +static inline int skb_fcoe_offset(const struct sk_buff *skb)
8956 +{
8957 + return skb_network_offset(skb);
8958 +}
8959 +
8960 +static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
8961 +{
8962 + return (struct fc_frame_header *)skb_transport_header(skb);
8963 +}
8964 +
8965 +static inline int skb_fc_offset(const struct sk_buff *skb)
8966 +{
8967 + return skb_transport_offset(skb);
8968 +}
8969 +
8970 +static inline void skb_reset_fc_header(struct sk_buff *skb)
8971 +{
8972 + skb_reset_network_header(skb);
8973 + skb_set_transport_header(skb, skb_network_offset(skb) +
8974 + sizeof(struct fcoe_hdr));
8975 +}
8976 +
8977 +static inline bool skb_fc_is_data(const struct sk_buff *skb)
8978 +{
8979 + return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
8980 +}
8981 +
8982 +static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
8983 +{
8984 + return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
8985 +}
8986 +
8987 +static inline bool skb_fc_has_exthdr(const struct sk_buff *skb)
8988 +{
8989 + return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) ||
8990 + (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) ||
8991 + (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH);
8992 +}
8993 +
8994 +static inline bool skb_fc_is_roff(const struct sk_buff *skb)
8995 +{
8996 + return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF;
8997 +}
8998 +
8999 +static inline u16 skb_fc_oxid(const struct sk_buff *skb)
9000 +{
9001 + return be16_to_cpu(skb_fc_header(skb)->fh_ox_id);
9002 +}
9003 +
9004 +static inline u16 skb_fc_rxid(const struct sk_buff *skb)
9005 +{
9006 + return be16_to_cpu(skb_fc_header(skb)->fh_rx_id);
9007 +}
9008 +
9009 +/* FIXME - DMA_BIDIRECTIONAL ? */
9010 +#define skb_cb(skb) ((struct fcoe_rcv_info *)&((skb)->cb[0]))
9011 +#define skb_cmd(skb) (skb_cb(skb)->fr_cmd)
9012 +#define skb_dir(skb) (skb_cmd(skb)->sc_data_direction)
9013 +static inline bool skb_fc_is_read(const struct sk_buff *skb)
9014 +{
9015 + if (skb_fc_is_cmd(skb) && skb_cmd(skb))
9016 + return skb_dir(skb) == DMA_FROM_DEVICE;
9017 + return false;
9018 +}
9019 +
9020 +static inline bool skb_fc_is_write(const struct sk_buff *skb)
9021 +{
9022 + if (skb_fc_is_cmd(skb) && skb_cmd(skb))
9023 + return skb_dir(skb) == DMA_TO_DEVICE;
9024 + return false;
9025 +}
9026 +
9027 +/* libfcoe funcs */
9028 +int fcoe_reset(struct Scsi_Host *shost);
9029 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
9030 + unsigned int scheme, unsigned int port);
9031 +
9032 +u32 fcoe_fc_crc(struct fc_frame *fp);
9033 +int fcoe_xmit(struct fc_lport *, struct fc_frame *);
9034 +int fcoe_rcv(struct sk_buff *, struct net_device *,
9035 + struct packet_type *, struct net_device *);
9036 +
9037 +int fcoe_percpu_receive_thread(void *arg);
9038 +void fcoe_clean_pending_queue(struct fc_lport *lp);
9039 +void fcoe_percpu_clean(struct fc_lport *lp);
9040 +void fcoe_watchdog(ulong vp);
9041 +int fcoe_link_ok(struct fc_lport *lp);
9042 +
9043 +struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
9044 +int fcoe_hostlist_add(const struct fc_lport *);
9045 +int fcoe_hostlist_remove(const struct fc_lport *);
9046 +
9047 +struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
9048 +int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
9049 +
9050 +/* fcoe sw hba */
9051 +int __init fcoe_sw_init(void);
9052 +int __exit fcoe_sw_exit(void);
9053 +#endif /* _LIBFCOE_H */