From: Greg Kroah-Hartman Date: Thu, 1 Aug 2013 23:22:29 +0000 (+0800) Subject: 3.4-stable patches X-Git-Tag: v3.0.89~7 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=53a4175e5099ee74740399e6323144e31948567c;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: firewire-fix-libdc1394-flycap2-iso-event-regression.patch md-raid10-remove-use-after-free-bug.patch md-raid5-fix-interaction-of-replace-and-recovery.patch s390-move-dummy-io_remap_pfn_range-to-asm-pgtable.h.patch xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch zfcp-status-read-buffers-on-first-adapter-open-with-link-down.patch --- diff --git a/queue-3.4/firewire-fix-libdc1394-flycap2-iso-event-regression.patch b/queue-3.4/firewire-fix-libdc1394-flycap2-iso-event-regression.patch new file mode 100644 index 00000000000..42d2846962b --- /dev/null +++ b/queue-3.4/firewire-fix-libdc1394-flycap2-iso-event-regression.patch @@ -0,0 +1,102 @@ +From 0699a73af3811b66b1ab5650575acee5eea841ab Mon Sep 17 00:00:00 2001 +From: Clemens Ladisch +Date: Mon, 22 Jul 2013 21:32:09 +0200 +Subject: firewire: fix libdc1394/FlyCap2 iso event regression + +From: Clemens Ladisch + +commit 0699a73af3811b66b1ab5650575acee5eea841ab upstream. + +Commit 18d627113b83 (firewire: prevent dropping of completed iso packet +header data) was intended to be an obvious bug fix, but libdc1394 and +FlyCap2 depend on the old behaviour by ignoring all returned information +and thus not noticing that not all packets have been received yet. The +result was that the video frame buffers would be saved before they +contained the correct data. + +Reintroduce the old behaviour for old clients. + +Tested-by: Stepan Salenikovich +Tested-by: Josep Bosch +Cc: # 3.4+ +Signed-off-by: Clemens Ladisch +Signed-off-by: Stefan Richter +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/firewire/core-cdev.c | 3 +++ + drivers/firewire/ohci.c | 10 ++++++++-- + include/linux/firewire-cdev.h | 4 ++-- + include/linux/firewire.h | 1 + + 4 files changed, 14 insertions(+), 4 deletions(-) + +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -53,6 +53,7 @@ + #define FW_CDEV_KERNEL_VERSION 5 + #define FW_CDEV_VERSION_EVENT_REQUEST2 4 + #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4 ++#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5 + + struct client { + u32 version; +@@ -998,6 +999,8 @@ static int ioctl_create_iso_context(stru + a->channel, a->speed, a->header_size, cb, client); + if (IS_ERR(context)) + return PTR_ERR(context); ++ if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW) ++ context->drop_overflow_headers = true; + + /* We only support one context at this time. */ + spin_lock_irq(&client->lock); +--- a/drivers/firewire/ohci.c ++++ b/drivers/firewire/ohci.c +@@ -2694,8 +2694,11 @@ static void copy_iso_headers(struct iso_ + { + u32 *ctx_hdr; + +- if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) ++ if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { ++ if (ctx->base.drop_overflow_headers) ++ return; + flush_iso_completions(ctx); ++ } + + ctx_hdr = ctx->header + ctx->header_length; + ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); +@@ -2855,8 +2858,11 @@ static int handle_it_packet(struct conte + + sync_it_packet_for_cpu(context, d); + +- if (ctx->header_length + 4 > PAGE_SIZE) ++ if (ctx->header_length + 4 > PAGE_SIZE) { ++ if (ctx->base.drop_overflow_headers) ++ return 1; + flush_iso_completions(ctx); ++ } + + ctx_hdr = ctx->header + ctx->header_length; + ctx->last_timestamp = le16_to_cpu(last->res_count); +--- a/include/linux/firewire-cdev.h ++++ b/include/linux/firewire-cdev.h +@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 { + * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with + * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets + * without the interrupt bit set that the kernel's internal buffer for @header +- * is about to overflow. (In the last case, kernels with ABI version < 5 drop +- * header data up to the next interrupt packet.) ++ * is about to overflow. (In the last case, ABI versions < 5 drop header data ++ * up to the next interrupt packet.) + * + * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): + * +--- a/include/linux/firewire.h ++++ b/include/linux/firewire.h +@@ -409,6 +409,7 @@ struct fw_iso_context { + int type; + int channel; + int speed; ++ bool drop_overflow_headers; + size_t header_size; + union { + fw_iso_callback_t sc; diff --git a/queue-3.4/md-raid10-remove-use-after-free-bug.patch b/queue-3.4/md-raid10-remove-use-after-free-bug.patch new file mode 100644 index 00000000000..e1de49376e5 --- /dev/null +++ b/queue-3.4/md-raid10-remove-use-after-free-bug.patch @@ -0,0 +1,49 @@ +From 0eb25bb027a100f5a9df8991f2f628e7d851bc1e Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Wed, 24 Jul 2013 15:37:42 +1000 +Subject: md/raid10: remove use-after-free bug. + +From: NeilBrown + +commit 0eb25bb027a100f5a9df8991f2f628e7d851bc1e upstream. + +We always need to be careful when calling generic_make_request, as it +can start a chain of events which might free something that we are +using. + +Here is one place I wasn't careful enough. If the wbio2 is not in +use, then it might get freed at the first generic_make_request call. +So perform all necessary tests first. + +This bug was introduced in 3.3-rc3 (24afd80d99) and can cause an +oops, so fix is suitable for any -stable since then. + +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid10.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -2029,12 +2029,18 @@ static void recovery_request_write(struc + d = r10_bio->devs[1].devnum; + wbio = r10_bio->devs[1].bio; + wbio2 = r10_bio->devs[1].repl_bio; ++ /* Need to test wbio2->bi_end_io before we call ++ * generic_make_request as if the former is NULL, ++ * the latter is free to free wbio2. ++ */ ++ if (wbio2 && !wbio2->bi_end_io) ++ wbio2 = NULL; + if (wbio->bi_end_io) { + atomic_inc(&conf->mirrors[d].rdev->nr_pending); + md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); + generic_make_request(wbio); + } +- if (wbio2 && wbio2->bi_end_io) { ++ if (wbio2) { + atomic_inc(&conf->mirrors[d].replacement->nr_pending); + md_sync_acct(conf->mirrors[d].replacement->bdev, + wbio2->bi_size >> 9); diff --git a/queue-3.4/md-raid5-fix-interaction-of-replace-and-recovery.patch b/queue-3.4/md-raid5-fix-interaction-of-replace-and-recovery.patch new file mode 100644 index 00000000000..027c78d6a21 --- /dev/null +++ b/queue-3.4/md-raid5-fix-interaction-of-replace-and-recovery.patch @@ -0,0 +1,106 @@ +From f94c0b6658c7edea8bc19d13be321e3860a3fa54 Mon Sep 17 00:00:00 2001 +From: NeilBrown +Date: Mon, 22 Jul 2013 12:57:21 +1000 +Subject: md/raid5: fix interaction of 'replace' and 'recovery'. + +From: NeilBrown + +commit f94c0b6658c7edea8bc19d13be321e3860a3fa54 upstream. + +If a device in a RAID4/5/6 is being replaced while another is being +recovered, then the writes to the replacement device currently don't +happen, resulting in corruption when the replacement completes and the +new drive takes over. + +This is because the replacement writes are only triggered when +'s.replacing' is set and not when the similar 's.sync' is set (which +is the case during resync and recovery - it means all devices need to +be read). + +So schedule those writes when s.replacing is set as well. + +In this case we cannot use "STRIPE_INSYNC" to record that the +replacement has happened as that is needed for recording that any +parity calculation is complete. So introduce STRIPE_REPLACED to +record if the replacement has happened. + +For safety we should also check that STRIPE_COMPUTE_RUN is not set. +This has a similar effect to the "s.locked == 0" test. The latter +ensure that now IO has been flagged but not started. The former +checks if any parity calculation has been flagged by not started. +We must wait for both of these to complete before triggering the +'replace'. + +Add a similar test to the subsequent check for "are we finished yet". +This possibly isn't needed (is subsumed in the STRIPE_INSYNC test), +but it makes it more obvious that the REPLACE will happen before we +think we are finished. + +Finally if a NeedReplace device is not UPTODATE then that is an +error. We really must trigger a warning. + +This bug was introduced in commit 9a3e1101b827a59ac9036a672f5fa8d5279d0fe2 +(md/raid5: detect and handle replacements during recovery.) +which introduced replacement for raid5. +That was in 3.3-rc3, so any stable kernel since then would benefit +from this fix. + +Reported-by: qindehua <13691222965@163.com> +Tested-by: qindehua +Signed-off-by: NeilBrown +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/raid5.c | 15 ++++++++++----- + drivers/md/raid5.h | 1 + + 2 files changed, 11 insertions(+), 5 deletions(-) + +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -3326,6 +3326,7 @@ static void handle_stripe(struct stripe_ + if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { + set_bit(STRIPE_SYNCING, &sh->state); + clear_bit(STRIPE_INSYNC, &sh->state); ++ clear_bit(STRIPE_REPLACED, &sh->state); + } + clear_bit(STRIPE_DELAYED, &sh->state); + +@@ -3465,19 +3466,23 @@ static void handle_stripe(struct stripe_ + handle_parity_checks5(conf, sh, &s, disks); + } + +- if (s.replacing && s.locked == 0 +- && !test_bit(STRIPE_INSYNC, &sh->state)) { ++ if ((s.replacing || s.syncing) && s.locked == 0 ++ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) ++ && !test_bit(STRIPE_REPLACED, &sh->state)) { + /* Write out to replacement devices where possible */ + for (i = 0; i < conf->raid_disks; i++) +- if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && +- test_bit(R5_NeedReplace, &sh->dev[i].flags)) { ++ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { ++ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); + set_bit(R5_WantReplace, &sh->dev[i].flags); + set_bit(R5_LOCKED, &sh->dev[i].flags); + s.locked++; + } +- set_bit(STRIPE_INSYNC, &sh->state); ++ if (s.replacing) ++ set_bit(STRIPE_INSYNC, &sh->state); ++ set_bit(STRIPE_REPLACED, &sh->state); + } + if ((s.syncing || s.replacing) && s.locked == 0 && ++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && + test_bit(STRIPE_INSYNC, &sh->state)) { + md_done_sync(conf->mddev, STRIPE_SECTORS, 1); + clear_bit(STRIPE_SYNCING, &sh->state); +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -306,6 +306,7 @@ enum { + STRIPE_SYNC_REQUESTED, + STRIPE_SYNCING, + STRIPE_INSYNC, ++ STRIPE_REPLACED, + STRIPE_PREREAD_ACTIVE, + STRIPE_DELAYED, + STRIPE_DEGRADED, diff --git a/queue-3.4/s390-move-dummy-io_remap_pfn_range-to-asm-pgtable.h.patch b/queue-3.4/s390-move-dummy-io_remap_pfn_range-to-asm-pgtable.h.patch new file mode 100644 index 00000000000..9639ae0c635 --- /dev/null +++ b/queue-3.4/s390-move-dummy-io_remap_pfn_range-to-asm-pgtable.h.patch @@ -0,0 +1,47 @@ +From 4f2e29031e6c67802e7370292dd050fd62f337ee Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Wed, 17 Apr 2013 08:46:19 -0700 +Subject: s390: move dummy io_remap_pfn_range() to asm/pgtable.h + +From: Linus Torvalds + +commit 4f2e29031e6c67802e7370292dd050fd62f337ee upstream. + +Commit b4cbb197c7e7 ("vm: add vm_iomap_memory() helper function") added +a helper function wrapper around io_remap_pfn_range(), and every other +architecture defined it in . + +The s390 choice of may make sense, but is not very convenient +for this case, and gratuitous differences like that cause unexpected errors like this: + + mm/memory.c: In function 'vm_iomap_memory': + mm/memory.c:2439:2: error: implicit declaration of function 'io_remap_pfn_range' [-Werror=implicit-function-declaration] + +Glory be the kbuild test robot who noticed this, bisected it, and +reported it to the guilty parties (ie me). + +Cc: Martin Schwidefsky +Cc: Heiko Carstens +Signed-off-by: Linus Torvalds +[bwh: Backported to 3.2: the macro was not defined, so this is an addition + and not a move] +Signed-off-by: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman + +--- + arch/s390/include/asm/pgtable.h | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/s390/include/asm/pgtable.h ++++ b/arch/s390/include/asm/pgtable.h +@@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned l + + #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + ++/* TODO: s390 cannot support io_remap_pfn_range... */ ++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ ++ remap_pfn_range(vma, vaddr, pfn, size, prot) ++ + #endif /* !__ASSEMBLY__ */ + + /* diff --git a/queue-3.4/series b/queue-3.4/series index ff2a0befb30..13d1e4ff9e8 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -28,3 +28,9 @@ drm-radeon-improve-dac-adjust-heuristics-for-legacy-pdac.patch drm-radeon-atom-initialize-more-atom-interpretor-elements-to-0.patch usb-serial-ftdi_sio-add-more-rt-systems-ftdi-devices.patch livelock-avoidance-in-sget.patch +md-raid5-fix-interaction-of-replace-and-recovery.patch +md-raid10-remove-use-after-free-bug.patch +xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch +firewire-fix-libdc1394-flycap2-iso-event-regression.patch +zfcp-status-read-buffers-on-first-adapter-open-with-link-down.patch +s390-move-dummy-io_remap_pfn_range-to-asm-pgtable.h.patch diff --git a/queue-3.4/xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch b/queue-3.4/xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch new file mode 100644 index 00000000000..effc3cd1c20 --- /dev/null +++ b/queue-3.4/xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch @@ -0,0 +1,103 @@ +From 179fbd5a45f0d4034cc6fd37b8d367a3b79663c4 Mon Sep 17 00:00:00 2001 +From: David Vrabel +Date: Fri, 19 Jul 2013 15:51:58 +0100 +Subject: xen/evtchn: avoid a deadlock when unbinding an event channel + +From: David Vrabel + +commit 179fbd5a45f0d4034cc6fd37b8d367a3b79663c4 upstream. + +Unbinding an event channel (either with the ioctl or when the evtchn +device is closed) may deadlock because disable_irq() is called with +port_user_lock held which is also locked by the interrupt handler. + +Think of the IOCTL_EVTCHN_UNBIND is being serviced, the routine has +just taken the lock, and an interrupt happens. The evtchn_interrupt +is invoked, tries to take the lock and spins forever. + +A quick glance at the code shows that the spinlock is a local IRQ +variant. Unfortunately that does not help as "disable_irq() waits for +the interrupt handler on all CPUs to stop running. If the irq occurs +on another VCPU, it tries to take port_user_lock and can't because +the unbind ioctl is holding it." (from David). Hence we cannot +depend on the said spinlock to protect us. We could make it a system +wide IRQ disable spinlock but there is a better way. + +We can piggyback on the fact that the existence of the spinlock is +to make get_port_user() checks be up-to-date. And we can alter those +checks to not depend on the spin lock (as it's protected by u->bind_mutex +in the ioctl) and can remove the unnecessary locking (this is +IOCTL_EVTCHN_UNBIND) path. + +In the interrupt handler we cannot use the mutex, but we do not +need it. + +"The unbind disables the irq before making the port user stale, so when +you clear it you are guaranteed that the interrupt handler that might +use that port cannot be running." (from David). + +Hence this patch removes the spinlock usage on the teardown path +and piggybacks on disable_irq happening before we muck with the +get_port_user() data. This ensures that the interrupt handler will +never run on stale data. + +Signed-off-by: David Vrabel +Signed-off-by: Konrad Rzeszutek Wilk +[v1: Expanded the commit description a bit] +Signed-off-by: Jonghwan Choi +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/xen/evtchn.c | 21 ++------------------- + 1 file changed, 2 insertions(+), 19 deletions(-) + +--- a/drivers/xen/evtchn.c ++++ b/drivers/xen/evtchn.c +@@ -377,18 +377,12 @@ static long evtchn_ioctl(struct file *fi + if (unbind.port >= NR_EVENT_CHANNELS) + break; + +- spin_lock_irq(&port_user_lock); +- + rc = -ENOTCONN; +- if (get_port_user(unbind.port) != u) { +- spin_unlock_irq(&port_user_lock); ++ if (get_port_user(unbind.port) != u) + break; +- } + + disable_irq(irq_from_evtchn(unbind.port)); + +- spin_unlock_irq(&port_user_lock); +- + evtchn_unbind_from_user(u, unbind.port); + + rc = 0; +@@ -488,26 +482,15 @@ static int evtchn_release(struct inode * + int i; + struct per_user_data *u = filp->private_data; + +- spin_lock_irq(&port_user_lock); +- +- free_page((unsigned long)u->ring); +- + for (i = 0; i < NR_EVENT_CHANNELS; i++) { + if (get_port_user(i) != u) + continue; + + disable_irq(irq_from_evtchn(i)); +- } +- +- spin_unlock_irq(&port_user_lock); +- +- for (i = 0; i < NR_EVENT_CHANNELS; i++) { +- if (get_port_user(i) != u) +- continue; +- + evtchn_unbind_from_user(get_port_user(i), i); + } + ++ free_page((unsigned long)u->ring); + kfree(u->name); + kfree(u); + diff --git a/queue-3.4/zfcp-status-read-buffers-on-first-adapter-open-with-link-down.patch b/queue-3.4/zfcp-status-read-buffers-on-first-adapter-open-with-link-down.patch new file mode 100644 index 00000000000..e940d77668d --- /dev/null +++ b/queue-3.4/zfcp-status-read-buffers-on-first-adapter-open-with-link-down.patch @@ -0,0 +1,141 @@ +From 9edf7d75ee5f21663a0183d21f702682d0ef132f Mon Sep 17 00:00:00 2001 +From: Steffen Maier +Date: Fri, 26 Apr 2013 17:34:54 +0200 +Subject: [SCSI] zfcp: status read buffers on first adapter open with link down + +From: Steffen Maier + +commit 9edf7d75ee5f21663a0183d21f702682d0ef132f upstream. + +Commit 64deb6efdc5504ce97b5c1c6f281fffbc150bd93 +"[SCSI] zfcp: Use status_read_buf_num provided by FCP channel" +started using a value returned by the channel but only evaluated the value +if the fabric link is up. +Commit 8d88cf3f3b9af4713642caeb221b6d6a42019001 +"[SCSI] zfcp: Update status read mempool" +introduced mempool resizings based on the above value. +On setting an FCP device online for the very first time since boot, a new +zeroed adapter object is allocated. If the link is down, the number of +status read requests remains zero. Since just the config data exchange is +incomplete, we proceed with adapter open recovery. However, we +unconditionally call mempool_resize with adapter->stat_read_buf_num == 0 in +this case. + +This causes a kernel message "kernel BUG at mm/mempool.c:131!" in process +"zfcperp" with last function mempool_resize in Krnl PSW +and zfcp_erp_thread in the Call Trace. + +Don't evaluate channel values which are invalid on link down. The number of +status read requests is always valid, evaluated, and set to a positive +minimum greater than zero. The adapter open recovery can proceed and the +channel has status read buffers to inform us on a future link up event. +While we are not aware of any other code path that could result in mempool +resize attempts of size zero, we still also initialize the number of status +read buffers to be posted to a static minimum number on adapter object +allocation. + +Backported for 3.4-stable. commit a53c8fa since v3.6-rc1 unified +copyright messages, e.g: revise such messages 'Copyright IBM Corporation' +as 'Copyright IBM Corp', so updated the messages as a53c8fa did. + +Signed-off-by: Steffen Maier +Cc: #2.6.35+ +Signed-off-by: James Bottomley +Signed-off-by: Zhouping Liu +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/s390/scsi/zfcp_aux.c | 5 ++++- + drivers/s390/scsi/zfcp_fsf.c | 23 ++++++++++++++++------- + 2 files changed, 20 insertions(+), 8 deletions(-) + +--- a/drivers/s390/scsi/zfcp_aux.c ++++ b/drivers/s390/scsi/zfcp_aux.c +@@ -3,7 +3,7 @@ + * + * Module interface and handling of zfcp data structures. + * +- * Copyright IBM Corporation 2002, 2010 ++ * Copyright IBM Corp. 2002, 2013 + */ + + /* +@@ -23,6 +23,7 @@ + * Christof Schmitt + * Martin Petermann + * Sven Schuetz ++ * Steffen Maier + */ + + #define KMSG_COMPONENT "zfcp" +@@ -415,6 +416,8 @@ struct zfcp_adapter *zfcp_adapter_enqueu + adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; + adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; + ++ adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; ++ + if (!zfcp_scsi_adapter_register(adapter)) + return adapter; + +--- a/drivers/s390/scsi/zfcp_fsf.c ++++ b/drivers/s390/scsi/zfcp_fsf.c +@@ -3,7 +3,7 @@ + * + * Implementation of FSF commands. + * +- * Copyright IBM Corporation 2002, 2010 ++ * Copyright IBM Corp. 2002, 2013 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -483,12 +483,8 @@ static int zfcp_fsf_exchange_config_eval + + fc_host_port_name(shost) = nsp->fl_wwpn; + fc_host_node_name(shost) = nsp->fl_wwnn; +- fc_host_port_id(shost) = ntoh24(bottom->s_id); +- fc_host_speed(shost) = +- zfcp_fsf_convert_portspeed(bottom->fc_link_speed); + fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; + +- adapter->hydra_version = bottom->adapter_type; + adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; + adapter->stat_read_buf_num = max(bottom->status_read_buf_num, + (u16)FSF_STATUS_READS_RECOM); +@@ -496,6 +492,19 @@ static int zfcp_fsf_exchange_config_eval + if (fc_host_permanent_port_name(shost) == -1) + fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + ++ zfcp_scsi_set_prot(adapter); ++ ++ /* no error return above here, otherwise must fix call chains */ ++ /* do not evaluate invalid fields */ ++ if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) ++ return 0; ++ ++ fc_host_port_id(shost) = ntoh24(bottom->s_id); ++ fc_host_speed(shost) = ++ zfcp_fsf_convert_portspeed(bottom->fc_link_speed); ++ ++ adapter->hydra_version = bottom->adapter_type; ++ + switch (bottom->fc_topology) { + case FSF_TOPO_P2P: + adapter->peer_d_id = ntoh24(bottom->peer_d_id); +@@ -517,8 +526,6 @@ static int zfcp_fsf_exchange_config_eval + return -EIO; + } + +- zfcp_scsi_set_prot(adapter); +- + return 0; + } + +@@ -569,6 +576,8 @@ static void zfcp_fsf_exchange_config_dat + &adapter->status); + zfcp_fsf_link_down_info_eval(req, + &qtcb->header.fsf_status_qual.link_down_info); ++ if (zfcp_fsf_exchange_config_evaluate(req)) ++ return; + break; + default: + zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");