--- /dev/null
+From: Jamie Wellnitz <jamie.wellnitz@emulex.com>
+Subject: Update lpfc to 8.2.8.3
+References: bnc#420767
+
+This patch updates the SLES 11 inbox lpfc driver to 8.2.8.3 which has several
+changes, mainly bugfixes:
+
+ * Changed version number to 8.2.8.3
+ * Resolved uninitialized node access (CR 83287)
+ * Fixed failing ioctl commands (CR 83850)
+ * Cosmetic coding style clean up
+ * Fix echotest failure when NPIV is enabled (CR 75009)
+ * Fixed Port busy events
+ * Back out slow vports fix (CR 83103)
+ * Added a vendor unique RSCN event to send entire payload to mgmt application
+ * Fixed internal loopback Hornet hardware (CR 83323)
+ * Fixed sysfs write handler for mailbox interface (CR 83674)
+ * Implement driver support for Power Management Suspend/Resume operations (CR
+ 74378)
+ * Changed version number to 8.2.8.2
+ * Added data structures required for new events.
+ * Streamlined interrupt enable/disable logic into helper routines
+ * Fixed incorrect decrement of cmd_pending count. (CR 83286)
+ * Fixed internal and external loopback on Hornet. (CR 83323)
+ * Removed unnecessary sleeps during HBA initialization. (CR 82846)
+ * Fixed RSCN address format not handled properly. (CR 82252)
+ * Fixed unload driver with vports locks up driver (CR 83334)
+ * Avoid polling HBA Error Attention when HBA's PCI channel is offline
+
+Signed-off-by: Jamie Wellnitz <jamie.wellnitz@emulex.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index 273aa4f..8e94902 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -3531,9 +3531,6 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ uint8_t *ext;
+ uint32_t size;
+
+- if ((count + off) > MAILBOX_CMD_SIZE)
+- return -ERANGE;
+-
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index 0c90479..a93c555 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -323,11 +323,10 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+ void lpfc_fabric_abort_hba(struct lpfc_hba *);
+ void lpfc_fabric_block_timeout(unsigned long);
+ void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
+-void lpfc_adjust_queue_depth(struct lpfc_hba *);
++void lpfc_rampdown_queue_depth(struct lpfc_hba *);
+ void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+ void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+ void lpfc_scsi_dev_block(struct lpfc_hba *);
+-void lpfc_scsi_dev_rescan(struct lpfc_hba *);
+
+ void
+ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index bce59ec..a95815e 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -224,7 +224,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ icmd->ulpContext = vport->vpi;
+ icmd->ulpCt_h = 0;
+- icmd->ulpCt_l = 1;
++ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
++ if (elscmd == ELS_CMD_ECHO)
++ icmd->ulpCt_l = 0; /* context = invalid RPI */
++ else
++ icmd->ulpCt_l = 1; /* context = VPI */
+ }
+
+ bpl = (struct ulp_bde64 *) pbuflist->virt;
+@@ -2504,6 +2508,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ case IOSTAT_LOCAL_REJECT:
+ switch ((irsp->un.ulpWord[4] & 0xff)) {
+ case IOERR_LOOP_OPEN_FAILURE:
++ if (cmd == ELS_CMD_FLOGI) {
++ if (PCI_DEVICE_ID_HORNET ==
++ phba->pcidev->device) {
++ phba->fc_topology = TOPOLOGY_LOOP;
++ phba->pport->fc_myDID = 0;
++ phba->alpa_map[0] = 0;
++ phba->alpa_map[1] = 0;
++ }
++ }
+ if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
+ delay = 1000;
+ retry = 1;
+@@ -3870,27 +3883,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
+ while (payload_len) {
+ rscn_did.un.word = be32_to_cpu(*lp++);
+ payload_len -= sizeof(uint32_t);
+- switch (rscn_did.un.b.resv) {
+- case 0: /* Single N_Port ID effected */
++ switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
++ case RSCN_ADDRESS_FORMAT_PORT:
+ if (ns_did.un.word == rscn_did.un.word)
+ goto return_did_out;
+ break;
+- case 1: /* Whole N_Port Area effected */
++ case RSCN_ADDRESS_FORMAT_AREA:
+ if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+ && (ns_did.un.b.area == rscn_did.un.b.area))
+ goto return_did_out;
+ break;
+- case 2: /* Whole N_Port Domain effected */
++ case RSCN_ADDRESS_FORMAT_DOMAIN:
+ if (ns_did.un.b.domain == rscn_did.un.b.domain)
+ goto return_did_out;
+ break;
+- default:
+- /* Unknown Identifier in RSCN node */
+- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+- "0217 Unknown Identifier in "
+- "RSCN payload Data: x%x\n",
+- rscn_did.un.word);
+- case 3: /* Whole Fabric effected */
++ case RSCN_ADDRESS_FORMAT_FABRIC:
+ goto return_did_out;
+ }
+ }
+@@ -3934,6 +3941,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
+ }
+
+ /**
++ * lpfc_send_rscn_event: Send an RSCN event to management application.
++ * @vport: pointer to a host virtual N_Port data structure.
++ * @cmdiocb: pointer to lpfc command iocb data structure.
++ *
++ * lpfc_send_rscn_event sends an RSCN netlink event to management
++ * applications.
++ */
++static void
++lpfc_send_rscn_event(struct lpfc_vport *vport,
++ struct lpfc_iocbq *cmdiocb)
++{
++ struct lpfc_dmabuf *pcmd;
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ uint32_t *payload_ptr;
++ uint32_t payload_len;
++ struct lpfc_rscn_event_header *rscn_event_data;
++
++ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
++ payload_ptr = (uint32_t *) pcmd->virt;
++ payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
++
++ rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
++ payload_len, GFP_KERNEL);
++ if (!rscn_event_data) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0147 Failed to allocate memory for RSCN event\n");
++ return;
++ }
++ rscn_event_data->event_type = FC_REG_RSCN_EVENT;
++ rscn_event_data->payload_length = payload_len;
++ memcpy(rscn_event_data->rscn_payload, payload_ptr,
++ payload_len);
++
++ fc_host_post_vendor_event(shost,
++ fc_get_event_number(),
++ sizeof(struct lpfc_els_event_header) + payload_len,
++ (char *)rscn_event_data,
++ LPFC_NL_VENDOR_ID);
++
++ kfree(rscn_event_data);
++}
++
++/**
+ * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+@@ -3980,6 +4030,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ "0214 RSCN received Data: x%x x%x x%x x%x\n",
+ vport->fc_flag, payload_len, *lp,
+ vport->fc_rscn_id_cnt);
++
++ /* Send an RSCN event to the management application */
++ lpfc_send_rscn_event(vport, cmdiocb);
++
+ for (i = 0; i < payload_len/sizeof(uint32_t); i++)
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_RSCN, lp[i]);
+@@ -5532,7 +5586,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ fc_get_event_number(),
+ sizeof(lsrjt_event),
+ (char *)&lsrjt_event,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+ return;
+ }
+ if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
+@@ -5550,7 +5604,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ fc_get_event_number(),
+ sizeof(fabric_event),
+ (char *)&fabric_event,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+ return;
+ }
+
+@@ -5568,32 +5622,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
+ static void
+ lpfc_send_els_event(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+- uint32_t cmd)
++ uint32_t *payload)
+ {
+- struct lpfc_els_event_header els_data;
++ struct lpfc_els_event_header *els_data = NULL;
++ struct lpfc_logo_event *logo_data = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+- els_data.event_type = FC_REG_ELS_EVENT;
+- switch (cmd) {
++ if (*payload == ELS_CMD_LOGO) {
++ logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
++ if (!logo_data) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0148 Failed to allocate memory "
++ "for LOGO event\n");
++ return;
++ }
++ els_data = &logo_data->header;
++ } else {
++ els_data = kmalloc(sizeof(struct lpfc_els_event_header),
++ GFP_KERNEL);
++ if (!els_data) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "0149 Failed to allocate memory "
++ "for ELS event\n");
++ return;
++ }
++ }
++ els_data->event_type = FC_REG_ELS_EVENT;
++ switch (*payload) {
+ case ELS_CMD_PLOGI:
+- els_data.subcategory = LPFC_EVENT_PLOGI_RCV;
++ els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
+ break;
+ case ELS_CMD_PRLO:
+- els_data.subcategory = LPFC_EVENT_PRLO_RCV;
++ els_data->subcategory = LPFC_EVENT_PRLO_RCV;
+ break;
+ case ELS_CMD_ADISC:
+- els_data.subcategory = LPFC_EVENT_ADISC_RCV;
++ els_data->subcategory = LPFC_EVENT_ADISC_RCV;
++ break;
++ case ELS_CMD_LOGO:
++ els_data->subcategory = LPFC_EVENT_LOGO_RCV;
++ /* Copy the WWPN in the LOGO payload */
++ memcpy(logo_data->logo_wwpn, &payload[2],
++ sizeof(struct lpfc_name));
+ break;
+ default:
+ return;
+ }
+- memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
+- memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
+- fc_host_post_vendor_event(shost,
+- fc_get_event_number(),
+- sizeof(els_data),
+- (char *)&els_data,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
++ memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
++ if (*payload == ELS_CMD_LOGO) {
++ fc_host_post_vendor_event(shost,
++ fc_get_event_number(),
++ sizeof(struct lpfc_logo_event),
++ (char *)logo_data,
++ LPFC_NL_VENDOR_ID);
++ kfree(logo_data);
++ } else {
++ fc_host_post_vendor_event(shost,
++ fc_get_event_number(),
++ sizeof(struct lpfc_els_event_header),
++ (char *)els_data,
++ LPFC_NL_VENDOR_ID);
++ kfree(els_data);
++ }
+
+ return;
+ }
+@@ -5700,7 +5790,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ phba->fc_stat.elsRcvPLOGI++;
+ ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+
+- lpfc_send_els_event(vport, ndlp, cmd);
++ lpfc_send_els_event(vport, ndlp, payload);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ if (!(phba->pport->fc_flag & FC_PT2PT) ||
+ (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
+@@ -5738,6 +5828,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvLOGO++;
++ lpfc_send_els_event(vport, ndlp, payload);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+@@ -5750,7 +5841,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvPRLO++;
+- lpfc_send_els_event(vport, ndlp, cmd);
++ lpfc_send_els_event(vport, ndlp, payload);
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+@@ -5768,7 +5859,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ "RCV ADISC: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+- lpfc_send_els_event(vport, ndlp, cmd);
++ lpfc_send_els_event(vport, ndlp, payload);
+ phba->fc_stat.elsRcvADISC++;
+ if (vport->port_state < LPFC_DISC_AUTH) {
+ rjt_err = LSRJT_UNABLE_TPC;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 3d825ff..502a9a5 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -391,7 +391,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+ evt_data_size = sizeof(fast_evt_data->un.
+ read_check_error);
+ } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
+- (evt_sub_category == IOSTAT_NPORT_BSY)) {
++ (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
+ evt_data = (char *) &fast_evt_data->un.fabric_evt;
+ evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
+ } else {
+@@ -428,7 +428,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ lpfc_free_fast_evt(phba, fast_evt_data);
+ return;
+@@ -635,20 +635,25 @@ lpfc_do_work(void *p)
+ set_user_nice(current, -20);
+ phba->data_flags = 0;
+
+- while (1) {
++ while (!kthread_should_stop()) {
+ /* wait and check worker queue activities */
+ rc = wait_event_interruptible(phba->work_waitq,
+ (test_and_clear_bit(LPFC_DATA_READY,
+ &phba->data_flags)
+ || kthread_should_stop()));
+- BUG_ON(rc);
+-
+- if (kthread_should_stop())
++ /* Signal wakeup shall terminate the worker thread */
++ if (rc) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
++ "0433 Wakeup on signal: rc=x%x\n", rc);
+ break;
++ }
+
+ /* Attend pending lpfc data processing */
+ lpfc_work_done(phba);
+ }
++ phba->worker_thread = NULL;
++ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
++ "0432 Worker thread stopped.\n");
+ return 0;
+ }
+
+@@ -1895,6 +1900,36 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
+ NLP_STE_UNUSED_NODE);
+ }
++/**
++ * lpfc_initialize_node: Initialize all fields of node object.
++ * @vport: Pointer to Virtual Port object.
++ * @ndlp: Pointer to FC node object.
++ * @did: FC_ID of the node.
++ * This function is always called when node object need to
++ * be initialized. It initializes all the fields of the node
++ * object.
++ **/
++static inline void
++lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
++ uint32_t did)
++{
++ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
++ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
++ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
++ init_timer(&ndlp->nlp_delayfunc);
++ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
++ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
++ init_timer(&ndlp->nlp_reauth_tmr);
++ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
++ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
++ ndlp->nlp_DID = did;
++ ndlp->vport = vport;
++ ndlp->nlp_sid = NLP_NO_SID;
++ kref_init(&ndlp->kref);
++ NLP_INT_NODE_ACT(ndlp);
++ atomic_set(&ndlp->cmd_pending, 0);
++ ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
++}
+
+ struct lpfc_nodelist *
+ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+@@ -1935,21 +1970,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ /* re-initialize ndlp except of ndlp linked list pointer */
+ memset((((char *)ndlp) + sizeof (struct list_head)), 0,
+ sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+- INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+- INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+- INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
+- init_timer(&ndlp->nlp_delayfunc);
+- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+- init_timer(&ndlp->nlp_reauth_tmr);
+- ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
+- ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
+- ndlp->nlp_DID = did;
+- ndlp->vport = vport;
+- ndlp->nlp_sid = NLP_NO_SID;
+- /* ndlp management re-initialize */
+- kref_init(&ndlp->kref);
+- NLP_INT_NODE_ACT(ndlp);
++ lpfc_initialize_node(vport, ndlp, did);
+
+ spin_unlock_irqrestore(&phba->ndlp_lock, flags);
+
+@@ -2561,7 +2582,8 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
+ alpa = lpfcAlpaArray[index];
+ if ((vport->fc_myDID & 0xff) == alpa)
+ continue;
+- lpfc_setup_disc_node(vport, alpa);
++ if (!(phba->link_flag & LS_LOOPBACK_MODE))
++ lpfc_setup_disc_node(vport, alpa);
+ }
+ }
+ return;
+@@ -3204,23 +3226,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+ {
+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+- INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+- INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
+- INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
+- init_timer(&ndlp->nlp_delayfunc);
+- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+- init_timer(&ndlp->nlp_reauth_tmr);
+- ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
+- ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
+- ndlp->nlp_DID = did;
+- ndlp->vport = vport;
+- ndlp->nlp_sid = NLP_NO_SID;
++ lpfc_initialize_node(vport, ndlp, did);
+ INIT_LIST_HEAD(&ndlp->nlp_listp);
+- kref_init(&ndlp->kref);
+- NLP_INT_NODE_ACT(ndlp);
+- atomic_set(&ndlp->cmd_pending, 0);
+- ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
+ "node init: did:x%x",
+diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
+index 9fc50ef..90d0c5a 100644
+--- a/drivers/scsi/lpfc/lpfc_hw.h
++++ b/drivers/scsi/lpfc/lpfc_hw.h
+@@ -66,6 +66,9 @@
+
+ #define BUF_SZ_4K 4096
+
++/* vendor ID used in SCSI netlink calls */
++#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
++
+ /* Common Transport structures and definitions */
+
+ union CtRevisionId {
+@@ -891,6 +894,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */
+ } un;
+ } D_ID;
+
++#define RSCN_ADDRESS_FORMAT_PORT 0x0
++#define RSCN_ADDRESS_FORMAT_AREA 0x1
++#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2
++#define RSCN_ADDRESS_FORMAT_FABRIC 0x3
++#define RSCN_ADDRESS_FORMAT_MASK 0x3
++
+ /*
+ * Structure to define all ELS Payload types
+ */
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index c19c631..c0ea4fc 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -879,8 +879,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(board_event),
+ (char *) &board_event,
+- SCSI_NL_VID_TYPE_PCI
+- | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ if (phba->work_hs & HS_FFER6) {
+ /* Re-establishing Link */
+@@ -2383,6 +2382,98 @@ lpfc_disable_msix(struct lpfc_hba *phba)
+ }
+
+ /**
++ * lpfc_enable_intr: Enable device interrupt.
++ * @phba: pointer to lpfc hba data structure.
++ *
++ * This routine is invoked to enable device interrupt and associate driver's
++ * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
++ * mode configured to the driver, the driver will try to fallback from the
++ * configured interrupt mode to an interrupt mode which is supported by the
++ * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
++ *
++ * Return codes
++ * 0 - sucessful
++ * other values - error
++ **/
++static int
++lpfc_enable_intr(struct lpfc_hba *phba)
++{
++ int retval = 0;
++
++ /* Starting point of configuring interrupt method */
++ phba->intr_type = NONE;
++
++ if (phba->cfg_use_msi == 2) {
++ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
++ retval = lpfc_sli_config_port(phba, 3);
++ if (retval)
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0478 Firmware not capable of SLI 3 mode.\n");
++ else {
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0479 Firmware capable of SLI 3 mode.\n");
++ /* Now, try to enable MSI-X interrupt mode */
++ retval = lpfc_enable_msix(phba);
++ if (!retval) {
++ phba->intr_type = MSIX;
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0480 enable MSI-X mode.\n");
++ }
++ }
++ }
++
++ /* Fallback to MSI if MSI-X initialization failed */
++ if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
++ retval = pci_enable_msi(phba->pcidev);
++ if (!retval) {
++ phba->intr_type = MSI;
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0481 enable MSI mode.\n");
++ } else
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0470 enable IRQ mode.\n");
++ }
++
++ /* MSI-X is the only case the doesn't need to call request_irq */
++ if (phba->intr_type != MSIX) {
++ retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
++ IRQF_SHARED, LPFC_DRIVER_NAME, phba);
++ if (retval) {
++ if (phba->intr_type == MSI)
++ pci_disable_msi(phba->pcidev);
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "0471 Enable interrupt handler "
++ "failed\n");
++ } else if (phba->intr_type != MSI)
++ phba->intr_type = INTx;
++ }
++
++ return retval;
++}
++
++/**
++ * lpfc_disable_intr: Disable device interrupt.
++ * @phba: pointer to lpfc hba data structure.
++ *
++ * This routine is invoked to disable device interrupt and disassociate the
++ * driver's interrupt handler(s) from interrupt vector(s). Depending on the
++ * interrupt mode, the driver will release the interrupt vector(s) for the
++ * message signaled interrupt.
++ **/
++static void
++lpfc_disable_intr(struct lpfc_hba *phba)
++{
++ if (phba->intr_type == MSIX)
++ lpfc_disable_msix(phba);
++ else {
++ free_irq(phba->pcidev->irq, phba);
++ if (phba->intr_type == MSI)
++ pci_disable_msi(phba->pcidev);
++ }
++ return;
++}
++
++/**
+ * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+@@ -2634,7 +2725,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+ lpfc_debugfs_initialize(vport);
+
+ pci_set_drvdata(pdev, shost);
+- phba->intr_type = NONE;
+
+ phba->MBslimaddr = phba->slim_memmap_p;
+ phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+@@ -2643,48 +2733,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+ phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+ /* Configure and enable interrupt */
+- if (phba->cfg_use_msi == 2) {
+- /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+- error = lpfc_sli_config_port(phba, 3);
+- if (error)
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0427 Firmware not capable of SLI 3 mode.\n");
+- else {
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0426 Firmware capable of SLI 3 mode.\n");
+- /* Now, try to enable MSI-X interrupt mode */
+- error = lpfc_enable_msix(phba);
+- if (!error) {
+- phba->intr_type = MSIX;
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0430 enable MSI-X mode.\n");
+- }
+- }
+- }
+-
+- /* Fallback to MSI if MSI-X initialization failed */
+- if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
+- retval = pci_enable_msi(phba->pcidev);
+- if (!retval) {
+- phba->intr_type = MSI;
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0473 enable MSI mode.\n");
+- } else
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0452 enable IRQ mode.\n");
+- }
+-
+- /* MSI-X is the only case the doesn't need to call request_irq */
+- if (phba->intr_type != MSIX) {
+- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+- if (retval) {
+- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
+- "interrupt handler failed\n");
+- error = retval;
+- goto out_disable_msi;
+- } else if (phba->intr_type != MSI)
+- phba->intr_type = INTx;
++ error = lpfc_enable_intr(phba);
++ if (error) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "0426 Failed to enable interrupt.\n");
++ goto out_destroy_port;
+ }
+
+ phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba);
+@@ -2731,7 +2784,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(adapter_event),
+ (char *) &adapter_event,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ scsi_scan_host(shost);
+
+@@ -2747,15 +2800,8 @@ out_free_irq:
+ lpfcdfc_host_del(phba->dfc_host);
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
+-
+- if (phba->intr_type == MSIX)
+- lpfc_disable_msix(phba);
+- else
+- free_irq(phba->pcidev->irq, phba);
+-
+-out_disable_msi:
+- if (phba->intr_type == MSI)
+- pci_disable_msi(phba->pcidev);
++ lpfc_disable_intr(phba);
++out_destroy_port:
+ destroy_port(vport);
+ out_kthread_stop:
+ kthread_stop(phba->worker_thread);
+@@ -2796,7 +2842,7 @@ out:
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+- * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup
++ * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
+ * for the HBA device to be removed from the PCI subsystem properly.
+ **/
+ static void __devexit
+@@ -2804,12 +2850,11 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
+ {
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ struct lpfc_vport **vports;
+ struct lpfc_hba *phba = vport->phba;
++ int i;
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+- /* In case PCI channel permanently disabled, rescan SCSI devices */
+- if (pdev->error_state == pci_channel_io_perm_failure)
+- lpfc_scsi_dev_rescan(phba);
+ lpfcdfc_host_del(phba->dfc_host);
+ phba->dfc_host = NULL;
+
+@@ -2822,6 +2867,14 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
+
+ kthread_stop(phba->worker_thread);
+
++ /* Release all the vports against this physical port */
++ vports = lpfc_create_vport_work_array(phba);
++ if (vports != NULL)
++ for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
++ fc_vport_terminate(vports[i]->fc_vport);
++ lpfc_destroy_vport_work_array(phba, vports);
++
++ /* Remove FC host and then SCSI host with the physical port */
+ fc_remove_host(shost);
+ scsi_remove_host(shost);
+ lpfc_cleanup(vport);
+@@ -2841,13 +2894,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
+
+ lpfc_debugfs_terminate(vport);
+
+- if (phba->intr_type == MSIX)
+- lpfc_disable_msix(phba);
+- else {
+- free_irq(phba->pcidev->irq, phba);
+- if (phba->intr_type == MSI)
+- pci_disable_msi(phba->pcidev);
+- }
++ /* Disable interrupt */
++ lpfc_disable_intr(phba);
+
+ pci_set_drvdata(pdev, NULL);
+ scsi_host_put(shost);
+@@ -2879,6 +2927,111 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
+ }
+
+ /**
++ * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
++ * @pdev: pointer to PCI device
++ * @msg: power management message
++ *
++ * This routine is to be registered to the kernel's PCI subsystem to support
++ * system Power Management (PM). When PM invokes this method, it quiesces the
++ * device by stopping the driver's worker thread for the device, turning off
++ * device's interrupt and DMA, and bring the device offline. Note that as the
++ * driver implements the minimum PM requirements to a power-aware driver's PM
++ * support for suspend/resume -- all the possible PM messages (SUSPEND,
++ * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
++ * and the driver will fully reinitialize its device during resume() method
++ * call, the driver will set device to PCI_D3hot state in PCI config space
++ * instead of setting it according to the @msg provided by the PM.
++ *
++ * Return code
++ * 0 - driver suspended the device
++ * Error otherwise
++ **/
++static int
++lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
++{
++ struct Scsi_Host *shost = pci_get_drvdata(pdev);
++ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
++
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0473 PCI device Power Management suspend.\n");
++
++ /* Bring down the device */
++ lpfc_offline_prep(phba);
++ lpfc_offline(phba);
++ kthread_stop(phba->worker_thread);
++
++ /* Disable interrupt from device */
++ lpfc_disable_intr(phba);
++
++ /* Save device state to PCI config space */
++ pci_save_state(pdev);
++ pci_set_power_state(pdev, PCI_D3hot);
++
++ return 0;
++}
++
++/**
++ * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
++ * @pdev: pointer to PCI device
++ *
++ * This routine is to be registered to the kernel's PCI subsystem to support
++ * system Power Management (PM). When PM invokes this method, it restores
++ * the device's PCI config space state and fully reinitializes the device
++ * and brings it online. Note that as the driver implements the minimum PM
++ * requirements to a power-aware driver's PM for suspend/resume -- all
++ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
++ * method call will be treated as SUSPEND and the driver will fully
++ * reinitialize its device during resume() method call, the device will be
++ * set to PCI_D0 directly in PCI config space before restoring the state.
++ *
++ * Return code
++ * 0 - driver suspended the device
++ * Error otherwise
++ **/
++static int
++lpfc_pci_resume_one(struct pci_dev *pdev)
++{
++ struct Scsi_Host *shost = pci_get_drvdata(pdev);
++ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
++ int error;
++
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "0452 PCI device Power Management resume.\n");
++
++ /* Restore device state from PCI config space */
++ pci_set_power_state(pdev, PCI_D0);
++ pci_restore_state(pdev);
++ if (pdev->is_busmaster)
++ pci_set_master(pdev);
++
++ /* Startup the kernel thread for this host adapter. */
++ phba->worker_thread = kthread_run(lpfc_do_work, phba,
++ "lpfc_worker_%d", phba->brd_no);
++ if (IS_ERR(phba->worker_thread)) {
++ error = PTR_ERR(phba->worker_thread);
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "0434 PM resume failed to start worker "
++ "thread: error=x%x.\n", error);
++ return error;
++ }
++
++ /* Enable interrupt from device */
++ error = lpfc_enable_intr(phba);
++ if (error) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "0430 PM resume Failed to enable interrupt: "
++ "error=x%x.\n", error);
++ return error;
++ }
++
++ /* Restart HBA and bring it online */
++ lpfc_sli_brdrestart(phba);
++ lpfc_online(phba);
++
++ return 0;
++}
++
++/**
+ * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+@@ -2921,13 +3074,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
+ pring = &psli->ring[psli->fcp_ring];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+
+- if (phba->intr_type == MSIX)
+- lpfc_disable_msix(phba);
+- else {
+- free_irq(phba->pcidev->irq, phba);
+- if (phba->intr_type == MSI)
+- pci_disable_msi(phba->pcidev);
+- }
++ /* Disable interrupt */
++ lpfc_disable_intr(phba);
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+@@ -2955,7 +3103,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ struct lpfc_sli *psli = &phba->sli;
+- int error, retval;
++ int error;
+
+ dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+ if (pci_enable_device_mem(pdev)) {
+@@ -2971,48 +3119,12 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Enable configured interrupt method */
+- phba->intr_type = NONE;
+- if (phba->cfg_use_msi == 2) {
+- /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+- error = lpfc_sli_config_port(phba, 3);
+- if (error)
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0478 Firmware not capable of SLI 3 mode.\n");
+- else {
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0479 Firmware capable of SLI 3 mode.\n");
+- /* Now, try to enable MSI-X interrupt mode */
+- error = lpfc_enable_msix(phba);
+- if (!error) {
+- phba->intr_type = MSIX;
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0480 enable MSI-X mode.\n");
+- }
+- }
+- }
+-
+- /* Fallback to MSI if MSI-X initialization failed */
+- if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
+- retval = pci_enable_msi(phba->pcidev);
+- if (!retval) {
+- phba->intr_type = MSI;
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0481 enable MSI mode.\n");
+- } else
+- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+- "0470 enable IRQ mode.\n");
+- }
+-
+- /* MSI-X is the only case the doesn't need to call request_irq */
+- if (phba->intr_type != MSIX) {
+- retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
+- IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+- if (retval) {
+- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+- "0471 Enable interrupt handler "
+- "failed\n");
+- } else if (phba->intr_type != MSI)
+- phba->intr_type = INTx;
++ error = lpfc_enable_intr(phba);
++ if (error) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "0427 Cannot re-enable interrupt after "
++ "slot reset.\n");
++ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Take device offline; this will perform cleanup */
+@@ -3130,6 +3242,8 @@ static struct pci_driver lpfc_driver = {
+ .id_table = lpfc_id_table,
+ .probe = lpfc_pci_probe_one,
+ .remove = __devexit_p(lpfc_pci_remove_one),
++ .suspend = lpfc_pci_suspend_one,
++ .resume = lpfc_pci_resume_one,
+ .err_handler = &lpfc_err_handler,
+ };
+
+diff --git a/drivers/scsi/lpfc/lpfc_ioctl.c b/drivers/scsi/lpfc/lpfc_ioctl.c
+index 242bed3..e80d157 100644
+--- a/drivers/scsi/lpfc/lpfc_ioctl.c
++++ b/drivers/scsi/lpfc/lpfc_ioctl.c
+@@ -828,10 +828,10 @@ lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba,
+ rc = EIO;
+
+ send_mgmt_cmd_free_outdmp:
+- spin_lock_irq(shost->host_lock);
+ dfc_cmd_data_free(phba, outdmp);
+ send_mgmt_cmd_free_indmp:
+ dfc_cmd_data_free(phba, indmp);
++ spin_lock_irq(shost->host_lock);
+ send_mgmt_cmd_free_bmpvirt:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ send_mgmt_cmd_free_bmp:
+@@ -2069,14 +2069,14 @@ __dfc_cmd_data_alloc(struct lpfc_hba * phba,
+ cnt)) {
+ goto out;
+ }
+-
++ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ pci_dma_sync_single_for_device(phba->pcidev,
+ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+- } else
++ } else {
+ memset((uint8_t *)dmp->dma.virt, 0, cnt);
+- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+-
++ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
++ }
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+diff --git a/drivers/scsi/lpfc/lpfc_menlo.c b/drivers/scsi/lpfc/lpfc_menlo.c
+index 60d3df8..aa36c16 100644
+--- a/drivers/scsi/lpfc/lpfc_menlo.c
++++ b/drivers/scsi/lpfc/lpfc_menlo.c
+@@ -42,6 +42,7 @@
+ #include "lpfc_vport.h"
+
+ #define MENLO_CMD_FW_DOWNLOAD 0x00000002
++#define MENLO_CMD_LOOPBACK 0x00000014
+
+ static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *,
+ struct lpfc_iocbq *, struct lpfc_iocbq *);
+@@ -686,6 +687,16 @@ lpfc_menlo_write(struct lpfc_hba *phba,
+ } else
+ memcpy((uint8_t *) mlast->dma.virt, buf, count);
+
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_LOOPBACK) {
++ if (mlast) {
++ tmpptr = (uint32_t *)mlast->dma.virt;
++ if (*(tmpptr+2))
++ phba->link_flag |= LS_LOOPBACK_MODE;
++ else
++ phba->link_flag &= ~LS_LOOPBACK_MODE;
++ }
++ }
++
+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD
+ && genreq->offset < hdr_offset) {
+ if (sysfs_menlo->cr.indmp
+diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
+index 1accb5a..991ad53 100644
+--- a/drivers/scsi/lpfc/lpfc_nl.h
++++ b/drivers/scsi/lpfc/lpfc_nl.h
+@@ -52,6 +52,13 @@
+ * The payload sent via the fc transport is one-way driver->application.
+ */
+
++/* RSCN event header */
++struct lpfc_rscn_event_header {
++ uint32_t event_type;
++ uint32_t payload_length; /* RSCN data length in bytes */
++ uint32_t rscn_payload[];
++};
++
+ /* els event header */
+ struct lpfc_els_event_header {
+ uint32_t event_type;
+@@ -65,6 +72,7 @@ struct lpfc_els_event_header {
+ #define LPFC_EVENT_PRLO_RCV 0x02
+ #define LPFC_EVENT_ADISC_RCV 0x04
+ #define LPFC_EVENT_LSRJT_RCV 0x08
++#define LPFC_EVENT_LOGO_RCV 0x10
+
+ /* special els lsrjt event */
+ struct lpfc_lsrjt_event {
+@@ -74,6 +82,11 @@ struct lpfc_lsrjt_event {
+ uint32_t explanation;
+ };
+
++/* special els logo event */
++struct lpfc_logo_event {
++ struct lpfc_els_event_header header;
++ uint8_t logo_wwpn[8];
++};
+
+ /* fabric event header */
+ struct lpfc_fabric_event_header {
+@@ -125,6 +138,7 @@ struct lpfc_scsi_varqueuedepth_event {
+ /* special case scsi check condition event */
+ struct lpfc_scsi_check_condition_event {
+ struct lpfc_scsi_event_header scsi_event;
++ uint8_t opcode;
+ uint8_t sense_key;
+ uint8_t asc;
+ uint8_t ascq;
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index a116875..a7ea952 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -147,12 +147,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
+ return;
+ }
+
+-/*
+- * This function is called with no lock held when there is a resource
+- * error in driver or in firmware.
+- */
++/**
++ * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
++ * @phba: The Hba for which this call is being executed.
++ *
++ * This routine is called when there is resource error in driver or firmware.
++ * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
++ * posts at most 1 event each second. This routine wakes up worker thread of
++ * @phba to process WORKER_RAM_DOWN_EVENT event.
++ *
++ * This routine should be called with no lock held.
++ **/
+ void
+-lpfc_adjust_queue_depth(struct lpfc_hba *phba)
++lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
+ {
+ unsigned long flags;
+ uint32_t evt_posted;
+@@ -335,22 +342,6 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
+-void
+-lpfc_scsi_dev_rescan(struct lpfc_hba *phba)
+-{
+- struct lpfc_vport **vports;
+- struct Scsi_Host *shost;
+- int i;
+-
+- vports = lpfc_create_vport_work_array(phba);
+- if (vports != NULL)
+- for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+- shost = lpfc_shost_from_vport(vports[i]);
+- scsi_scan_host(shost);
+- }
+- lpfc_destroy_vport_work_array(phba, vports);
+-}
+-
+ /*
+ * This routine allocates a scsi buffer, which contains all the necessary
+ * information needed to initiate a SCSI I/O. The non-DMAable buffer region
+@@ -861,7 +852,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+
+ lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
+ lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+- atomic_dec(&pnode->cmd_pending);
++ if (pnode && NLP_CHK_NODE_ACT(pnode))
++ atomic_dec(&pnode->cmd_pending);
+
+ if (lpfc_cmd->status) {
+ if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
+@@ -951,23 +943,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
+ time_after(jiffies, lpfc_cmd->start_time +
+ msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+- if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
+- (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
+- ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
+- pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
+-
+- pnode->last_change_time = jiffies;
++ if (pnode && NLP_CHK_NODE_ACT(pnode)) {
++ if (pnode->cmd_qdepth >
++ atomic_read(&pnode->cmd_pending) &&
++ (atomic_read(&pnode->cmd_pending) >
++ LPFC_MIN_TGT_QDEPTH) &&
++ ((cmd->cmnd[0] == READ_10) ||
++ (cmd->cmnd[0] == WRITE_10)))
++ pnode->cmd_qdepth =
++ atomic_read(&pnode->cmd_pending);
++
++ pnode->last_change_time = jiffies;
++ }
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
+- } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
++ } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
++ if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
+ time_after(jiffies, pnode->last_change_time +
+- msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
+- spin_lock_irqsave(sdev->host->host_lock, flags);
+- pnode->cmd_qdepth += pnode->cmd_qdepth *
+- LPFC_TGTQ_RAMPUP_PCENT / 100;
+- if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
+- pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
+- pnode->last_change_time = jiffies;
+- spin_unlock_irqrestore(sdev->host->host_lock, flags);
++ msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
++ spin_lock_irqsave(sdev->host->host_lock, flags);
++ pnode->cmd_qdepth += pnode->cmd_qdepth *
++ LPFC_TGTQ_RAMPUP_PCENT / 100;
++ if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
++ pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
++ pnode->last_change_time = jiffies;
++ spin_unlock_irqrestore(sdev->host->host_lock, flags);
++ }
+ }
+
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+@@ -1363,13 +1363,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+ cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ goto out_fail_command;
+ }
+-
+- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
++ if (vport->cfg_max_scsicmpl_time &&
++ (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
+ goto out_host_busy;
+
+ lpfc_cmd = lpfc_get_scsi_buf(phba);
+ if (lpfc_cmd == NULL) {
+- lpfc_adjust_queue_depth(phba);
++ lpfc_rampdown_queue_depth(phba);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0707 driver's buffer pool is empty, "
+@@ -1397,9 +1397,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+ atomic_inc(&ndlp->cmd_pending);
+ err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+ &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+- if (err)
++ if (err) {
++ atomic_dec(&ndlp->cmd_pending);
+ goto out_host_busy_free_buf;
+-
++ }
+ if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_sli_poll_fcp_ring(phba);
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT)
+@@ -1409,7 +1410,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
+ return 0;
+
+ out_host_busy_free_buf:
+- atomic_dec(&ndlp->cmd_pending);
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+ out_host_busy:
+@@ -1575,7 +1575,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+@@ -1672,7 +1672,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
+ fc_get_event_number(),
+ sizeof(scsi_event),
+ (char *)&scsi_event,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ lpfc_block_error_handler(cmnd);
+ /*
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index d4341df..ac78493 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -1985,7 +1985,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+- lpfc_adjust_queue_depth(phba);
++ lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
+@@ -2228,7 +2228,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
+ if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+- lpfc_adjust_queue_depth(phba);
++ lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
+
+@@ -2793,7 +2793,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
+ {
+ MAILBOX_t *mb;
+ struct lpfc_sli *psli;
+- uint16_t skip_post;
+ volatile uint32_t word0;
+ void __iomem *to_slim;
+
+@@ -2818,13 +2817,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
+ readl(to_slim); /* flush */
+
+ /* Only skip post after fc_ffinit is completed */
+- if (phba->pport->port_state) {
+- skip_post = 1;
++ if (phba->pport->port_state)
+ word0 = 1; /* This is really setting up word1 */
+- } else {
+- skip_post = 0;
++ else
+ word0 = 0; /* This is really setting up word1 */
+- }
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
+ writel(*(uint32_t *) mb, to_slim);
+ readl(to_slim); /* flush */
+@@ -2838,10 +2834,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
+- if (skip_post)
+- mdelay(100);
+- else
+- mdelay(2000);
++ /* Give the INITFF and Post time to settle. */
++ mdelay(100);
+
+ lpfc_hba_down_post(phba);
+
+@@ -3087,7 +3081,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
+ spin_unlock_irq(&phba->hbalock);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ lpfc_sli_brdrestart(phba);
+- msleep(2500);
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ break;
+@@ -4041,7 +4034,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ }
+
+@@ -5220,6 +5213,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba)
+ {
+ uint32_t ha_copy;
+
++ /* If PCI channel is offline, don't process it */
++ if (unlikely(pci_channel_offline(phba->pcidev)))
++ return 0;
++
+ /* If somebody is waiting to handle an eratt, don't process it
+ * here. The brdkill function will do this.
+ */
+diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
+index 899a337..a42cef2 100644
+--- a/drivers/scsi/lpfc/lpfc_version.h
++++ b/drivers/scsi/lpfc/lpfc_version.h
+@@ -18,7 +18,7 @@
+ * included with this package. *
+ *******************************************************************/
+
+-#define LPFC_DRIVER_VERSION "8.2.8.1"
++#define LPFC_DRIVER_VERSION "8.2.8.3"
+
+ #define LPFC_DRIVER_NAME "lpfc"
+ #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index 8761840..c3a3f6e 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -605,6 +605,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ spin_unlock_irq(&phba->hbalock);
+ kfree(vport->vname);
+ lpfc_debugfs_terminate(vport);
++
++ /* Remove FC host and then SCSI host with the vport */
+ fc_remove_host(lpfc_shost_from_vport(vport));
+ scsi_remove_host(lpfc_shost_from_vport(vport));
+
+@@ -689,8 +691,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ }
+ vport->unreg_vpi_cmpl = VPORT_INVAL;
+ timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
+- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+- goto skip_logo;
+ if (!lpfc_issue_els_npiv_logo(vport, ndlp))
+ while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
+ timeout = schedule_timeout(timeout);