--- /dev/null
+From: Jamie Wellnitz <jamie.wellnitz@emulex.com>
+Subject: Update lpfc to 8.2.8.1
+References: bnc#420767
+
+This patch adds a few features (including FC authentication and a few
+management ioctls) to lpfc 8.2.8 and generates 8.2.8.1.
+
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+
+---
+ drivers/scsi/lpfc/Makefile | 5
+ drivers/scsi/lpfc/lpfc.h | 129 +
+ drivers/scsi/lpfc/lpfc_attr.c | 976 ++++++++++++-
+ drivers/scsi/lpfc/lpfc_auth.c | 838 +++++++++++
+ drivers/scsi/lpfc/lpfc_auth.h | 92 +
+ drivers/scsi/lpfc/lpfc_auth_access.c | 598 ++++++++
+ drivers/scsi/lpfc/lpfc_auth_access.h | 245 +++
+ drivers/scsi/lpfc/lpfc_crtn.h | 37
+ drivers/scsi/lpfc/lpfc_disc.h | 3
+ drivers/scsi/lpfc/lpfc_els.c | 663 +++++++++
+ drivers/scsi/lpfc/lpfc_hbadisc.c | 154 +-
+ drivers/scsi/lpfc/lpfc_hw.h | 52
+ drivers/scsi/lpfc/lpfc_init.c | 154 +-
+ drivers/scsi/lpfc/lpfc_ioctl.c | 2519 +++++++++++++++++++++++++++++++++++
+ drivers/scsi/lpfc/lpfc_ioctl.h | 184 ++
+ drivers/scsi/lpfc/lpfc_logmsg.h | 1
+ drivers/scsi/lpfc/lpfc_mbox.c | 2
+ drivers/scsi/lpfc/lpfc_menlo.c | 1174 ++++++++++++++++
+ drivers/scsi/lpfc/lpfc_scsi.c | 36
+ drivers/scsi/lpfc/lpfc_security.c | 339 ++++
+ drivers/scsi/lpfc/lpfc_security.h | 24
+ drivers/scsi/lpfc/lpfc_sli.c | 59
+ drivers/scsi/lpfc/lpfc_sli.h | 3
+ drivers/scsi/lpfc/lpfc_version.h | 2
+ drivers/scsi/lpfc/lpfc_vport.c | 16
+ 25 files changed, 8149 insertions(+), 156 deletions(-)
+
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -41,6 +41,7 @@
+ #include "lpfc_compat.h"
+ #include "lpfc_crtn.h"
+ #include "lpfc_vport.h"
++#include "lpfc_auth_access.h"
+
+ #define LPFC_DEF_DEVLOSS_TMO 30
+ #define LPFC_MIN_DEVLOSS_TMO 1
+@@ -50,6 +51,15 @@
+ #define LPFC_LINK_SPEED_BITMAP 0x00000117
+ #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
+
++extern struct bin_attribute sysfs_menlo_attr;
++
++/*
++ * Write key size should be multiple of 4. If write key is changed
++ * make sure that library write key is also changed.
++ */
++#define LPFC_REG_WRITE_KEY_SIZE 4
++#define LPFC_REG_WRITE_KEY "EMLX"
++
+ /**
+ * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules.
+ * @incr: integer to convert.
+@@ -551,7 +561,7 @@ lpfc_do_offline(struct lpfc_hba *phba, u
+ * -EIO reset not configured or error posting the event
+ * zero for success
+ **/
+-static int
++int
+ lpfc_selective_reset(struct lpfc_hba *phba)
+ {
+ struct completion online_compl;
+@@ -1080,6 +1090,141 @@ lpfc_poll_store(struct device *dev, stru
+ return strlen(buf);
+ }
+
++static ssize_t
++lpfc_auth_state_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ switch (vport->auth.auth_state) {
++ case LPFC_AUTH_UNKNOWN:
++ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
++ return snprintf(buf, PAGE_SIZE, "Authenticating\n");
++ else
++ return snprintf(buf, PAGE_SIZE, "Not Authenticated\n");
++ case LPFC_AUTH_FAIL:
++ return snprintf(buf, PAGE_SIZE, "Failed\n");
++ case LPFC_AUTH_SUCCESS:
++ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
++ return snprintf(buf, PAGE_SIZE, "Authenticating\n");
++ else if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS)
++ return snprintf(buf, PAGE_SIZE, "Authenticated\n");
++ }
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++}
++
++static ssize_t
++lpfc_auth_dir_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ if (!vport->cfg_enable_auth ||
++ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++ if (vport->auth.direction == AUTH_DIRECTION_LOCAL)
++ return snprintf(buf, PAGE_SIZE, "Local Authenticated\n");
++ else if (vport->auth.direction == AUTH_DIRECTION_REMOTE)
++ return snprintf(buf, PAGE_SIZE, "Remote Authenticated\n");
++ else if (vport->auth.direction == AUTH_DIRECTION_BIDI)
++ return snprintf(buf, PAGE_SIZE, "Bidi Authentication\n");
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++}
++
++static ssize_t
++lpfc_auth_protocol_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ if (vport->cfg_enable_auth &&
++ vport->auth.auth_state == LPFC_AUTH_SUCCESS)
++ return snprintf(buf, PAGE_SIZE, "1 (DH-CHAP)\n");
++ else
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++}
++
++static ssize_t
++lpfc_auth_dhgroup_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ if (!vport->cfg_enable_auth ||
++ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++ switch (vport->auth.group_id) {
++ case DH_GROUP_NULL:
++ return snprintf(buf, PAGE_SIZE, "0 (NULL)\n");
++ case DH_GROUP_1024:
++ return snprintf(buf, PAGE_SIZE, "1 (1024)\n");
++ case DH_GROUP_1280:
++ return snprintf(buf, PAGE_SIZE, "2 (1280)\n");
++ case DH_GROUP_1536:
++ return snprintf(buf, PAGE_SIZE, "3 (1536)\n");
++ case DH_GROUP_2048:
++ return snprintf(buf, PAGE_SIZE, "4 (2048)\n");
++ }
++ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
++ vport->auth.group_id);
++}
++
++static ssize_t
++lpfc_auth_hash_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ if (!vport->cfg_enable_auth ||
++ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
++ return snprintf(buf, PAGE_SIZE, "Unknown\n");
++ switch (vport->auth.hash_id) {
++ case FC_SP_HASH_MD5:
++ return snprintf(buf, PAGE_SIZE, "5 (MD5)\n");
++ case FC_SP_HASH_SHA1:
++ return snprintf(buf, PAGE_SIZE, "6 (SHA1)\n");
++ }
++ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
++ vport->auth.hash_id);
++}
++static ssize_t
++lpfc_auth_last_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ struct timeval last_time;
++ if (!vport->cfg_enable_auth || vport->auth.last_auth == 0)
++ return snprintf(buf, PAGE_SIZE, "%d\n", -1);
++ jiffies_to_timeval((jiffies - vport->auth.last_auth), &last_time);
++ return snprintf(buf, PAGE_SIZE, "%ld\n", last_time.tv_sec);
++}
++
++static ssize_t
++lpfc_auth_next_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ unsigned long next_jiff;
++ struct timeval next_time;
++ if (!vport->cfg_enable_auth ||
++ vport->auth.last_auth == 0 ||
++ vport->auth.reauth_interval == 0)
++ return snprintf(buf, PAGE_SIZE, "%d\n", -1);
++ /* calculate the amount of time left until next auth */
++ next_jiff = (msecs_to_jiffies(vport->auth.reauth_interval * 60000) +
++ vport->auth.last_auth) - jiffies;
++ jiffies_to_timeval(next_jiff, &next_time);
++ return snprintf(buf, PAGE_SIZE, "%ld\n", next_time.tv_sec);
++}
++
+ /**
+ * lpfc_param_show: Return a cfg attribute value in decimal.
+ *
+@@ -1512,7 +1657,38 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpf
+ static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
+ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
+ static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
++static DEVICE_ATTR(auth_state, S_IRUGO, lpfc_auth_state_show, NULL);
++static DEVICE_ATTR(auth_dir, S_IRUGO, lpfc_auth_dir_show, NULL);
++static DEVICE_ATTR(auth_protocol, S_IRUGO, lpfc_auth_protocol_show, NULL);
++static DEVICE_ATTR(auth_dhgroup, S_IRUGO, lpfc_auth_dhgroup_show, NULL);
++static DEVICE_ATTR(auth_hash, S_IRUGO, lpfc_auth_hash_show, NULL);
++static DEVICE_ATTR(auth_last, S_IRUGO, lpfc_auth_last_show, NULL);
++static DEVICE_ATTR(auth_next, S_IRUGO, lpfc_auth_next_show, NULL);
++
++static int
++lpfc_parse_wwn(const char *ns, uint8_t *nm)
++{
++ unsigned int i, j;
++ memset(nm, 0, 8);
++
++ /* Validate and store the new name */
++ for (i = 0, j = 0; i < 16; i++) {
++ if ((*ns >= 'a') && (*ns <= 'f'))
++ j = ((j << 4) | ((*ns++ - 'a') + 10));
++ else if ((*ns >= 'A') && (*ns <= 'F'))
++ j = ((j << 4) | ((*ns++ - 'A') + 10));
++ else if ((*ns >= '0') && (*ns <= '9'))
++ j = ((j << 4) | (*ns++ - '0'));
++ else
++ return -EINVAL;
++ if (i % 2) {
++ nm[i/2] = j & 0xff;
++ j = 0;
++ }
++ }
+
++ return 0;
++}
+
+ static char *lpfc_soft_wwn_key = "C99G71SL8032A";
+
+@@ -1908,6 +2084,87 @@ lpfc_vport_param_store(nodev_tmo)
+
+ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
+ lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
++static ssize_t
++lpfc_authenticate(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_hba *phba = vport->phba;
++ struct lpfc_nodelist *ndlp;
++ int status;
++ struct lpfc_name wwpn;
++
++ if (lpfc_parse_wwn(buf, wwpn.u.wwn))
++ return -EINVAL;
++
++ if (vport->port_state == LPFC_VPORT_FAILED) {
++ lpfc_issue_lip(shost);
++ return strlen(buf);
++ }
++ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
++ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
++ (!vport->cfg_enable_auth))
++ return -EPERM;
++
++ /* If vport already in the middle of authentication do not restart */
++ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) ||
++ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) ||
++ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY))
++ return -EAGAIN;
++
++ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN)
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ else
++ ndlp = lpfc_findnode_wwnn(vport, &wwpn);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
++ return -EPERM;
++ status = lpfc_start_node_authentication(ndlp);
++ if (status)
++ return status;
++ return strlen(buf);
++}
++static DEVICE_ATTR(lpfc_authenticate, S_IRUGO | S_IWUSR, NULL,
++ lpfc_authenticate);
++
++static ssize_t
++lpfc_update_auth_config(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_hba *phba = vport->phba;
++ struct lpfc_nodelist *ndlp;
++ struct lpfc_name wwpn;
++ int status;
++
++ if (lpfc_parse_wwn(buf, wwpn.u.wwn))
++ return -EINVAL;
++
++ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
++ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
++ (!vport->cfg_enable_auth))
++ return -EPERM;
++
++ /* If vport already in the middle of authentication do not restart */
++ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) ||
++ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) ||
++ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY))
++ return -EAGAIN;
++
++ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN)
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ else
++ ndlp = lpfc_findnode_wwnn(vport, &wwpn);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
++ return -EPERM;
++ status = lpfc_get_auth_config(ndlp, &wwpn);
++ if (status)
++ return -EPERM;
++ return strlen(buf);
++}
++static DEVICE_ATTR(lpfc_update_auth_config, S_IRUGO | S_IWUSR,
++ NULL, lpfc_update_auth_config);
+
+ /*
+ # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
+@@ -2753,6 +3010,48 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Messa
+ "MSI-X (2), if possible");
+
+ /*
++# lpfc_enable_auth: controls FC Authentication.
++# 0 = Authentication OFF
++# 1 = Authentication ON
++# Value range [0,1]. Default value is 0.
++*/
++static int lpfc_enable_auth;
++module_param(lpfc_enable_auth, int, 0);
++MODULE_PARM_DESC(lpfc_enable_auth, "Enable FC Authentication");
++lpfc_vport_param_show(enable_auth);
++lpfc_vport_param_init(enable_auth, 0, 0, 1);
++static int
++lpfc_enable_auth_set(struct lpfc_vport *vport, int val)
++{
++ if (val == vport->cfg_enable_auth)
++ return 0;
++ if (val == 0) {
++ spin_lock_irq(&fc_security_user_lock);
++ list_del(&vport->sc_users);
++ spin_unlock_irq(&fc_security_user_lock);
++ vport->cfg_enable_auth = val;
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_offline_work);
++ return 0;
++ } else if (val == 1) {
++ spin_lock_irq(&fc_security_user_lock);
++ list_add_tail(&vport->sc_users, &fc_security_user_list);
++ spin_unlock_irq(&fc_security_user_lock);
++ vport->cfg_enable_auth = val;
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_online_work);
++ return 0;
++ }
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
++ "0431 lpfc_enable_auth attribute cannot be set to %d, "
++ "allowed range is [0, 1]\n", val);
++ return -EINVAL;
++}
++lpfc_vport_param_store(enable_auth);
++static DEVICE_ATTR(lpfc_enable_auth, S_IRUGO | S_IWUSR,
++ lpfc_enable_auth_show, lpfc_enable_auth_store);
++
++/*
+ # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
+ # 0 = HBA resets disabled
+ # 1 = HBA resets enabled (default)
+@@ -2825,6 +3124,16 @@ struct device_attribute *lpfc_hba_attrs[
+ &dev_attr_lpfc_poll,
+ &dev_attr_lpfc_poll_tmo,
+ &dev_attr_lpfc_use_msi,
++ &dev_attr_lpfc_enable_auth,
++ &dev_attr_lpfc_authenticate,
++ &dev_attr_lpfc_update_auth_config,
++ &dev_attr_auth_state,
++ &dev_attr_auth_dir,
++ &dev_attr_auth_protocol,
++ &dev_attr_auth_dhgroup,
++ &dev_attr_auth_hash,
++ &dev_attr_auth_last,
++ &dev_attr_auth_next,
+ &dev_attr_lpfc_soft_wwnn,
+ &dev_attr_lpfc_soft_wwpn,
+ &dev_attr_lpfc_soft_wwn_enable,
+@@ -2855,6 +3164,14 @@ struct device_attribute *lpfc_vport_attr
+ &dev_attr_nport_evt_cnt,
+ &dev_attr_npiv_info,
+ &dev_attr_lpfc_enable_da_id,
++ &dev_attr_auth_state,
++ &dev_attr_auth_dir,
++ &dev_attr_auth_protocol,
++ &dev_attr_auth_dhgroup,
++ &dev_attr_auth_hash,
++ &dev_attr_auth_last,
++ &dev_attr_auth_next,
++
+ &dev_attr_lpfc_max_scsicmpl_time,
+ &dev_attr_lpfc_stat_data_ctrl,
+ NULL,
+@@ -2888,21 +3205,23 @@ sysfs_ctlreg_write(struct kobject *kobj,
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+- if ((off + count) > FF_REG_AREA_SIZE)
++ if ((off + count) > FF_REG_AREA_SIZE + LPFC_REG_WRITE_KEY_SIZE)
+ return -ERANGE;
+
+- if (count == 0) return 0;
++ if (count <= LPFC_REG_WRITE_KEY_SIZE)
++ return 0;
+
+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
+ return -EINVAL;
+
+- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+- return -EPERM;
+- }
++ /* This is to protect HBA registers from accidental writes. */
++ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
++ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+- for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
+- writel(*((uint32_t *)(buf + buf_off)),
++ for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
++ buf_off += sizeof(uint32_t))
++ writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
+ phba->ctrl_regs_memmap_p + off + buf_off);
+
+ spin_unlock_irq(&phba->hbalock);
+@@ -2971,21 +3290,211 @@ static struct bin_attribute sysfs_ctlreg
+ .write = sysfs_ctlreg_write,
+ };
+
++static struct lpfc_sysfs_mbox *
++lpfc_get_sysfs_mbox(struct lpfc_hba *phba, uint8_t create)
++{
++ struct lpfc_sysfs_mbox *sysfs_mbox;
++ pid_t pid;
++
++ pid = current->pid;
++
++ spin_lock_irq(&phba->hbalock);
++ list_for_each_entry(sysfs_mbox, &phba->sysfs_mbox_list, list) {
++ if (sysfs_mbox->pid == pid) {
++ spin_unlock_irq(&phba->hbalock);
++ return sysfs_mbox;
++ }
++ }
++ if (!create) {
++ spin_unlock_irq(&phba->hbalock);
++ return NULL;
++ }
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_mbox = kzalloc(sizeof(struct lpfc_sysfs_mbox),
++ GFP_KERNEL);
++ if (!sysfs_mbox)
++ return NULL;
++ sysfs_mbox->state = SMBOX_IDLE;
++ sysfs_mbox->pid = pid;
++ spin_lock_irq(&phba->hbalock);
++ list_add_tail(&sysfs_mbox->list, &phba->sysfs_mbox_list);
++
++ spin_unlock_irq(&phba->hbalock);
++ return sysfs_mbox;
++
++}
+ /**
+ * sysfs_mbox_idle: frees the sysfs mailbox.
+ * @phba: lpfc_hba pointer
+ **/
+ static void
+-sysfs_mbox_idle(struct lpfc_hba *phba)
++sysfs_mbox_idle(struct lpfc_hba *phba,
++ struct lpfc_sysfs_mbox *sysfs_mbox)
+ {
+- phba->sysfs_mbox.state = SMBOX_IDLE;
+- phba->sysfs_mbox.offset = 0;
+-
+- if (phba->sysfs_mbox.mbox) {
+- mempool_free(phba->sysfs_mbox.mbox,
++ list_del_init(&sysfs_mbox->list);
++ if (sysfs_mbox->mbox) {
++ mempool_free(sysfs_mbox->mbox,
+ phba->mbox_mem_pool);
+- phba->sysfs_mbox.mbox = NULL;
+ }
++
++ if (sysfs_mbox->mbext)
++ kfree(sysfs_mbox->mbext);
++
++ /* If txmit buffer allocated free txmit buffer */
++ if (sysfs_mbox->txmit_buff) {
++ if (sysfs_mbox->txmit_buff->virt)
++ __lpfc_mbuf_free(phba,
++ sysfs_mbox->txmit_buff->virt,
++ sysfs_mbox->txmit_buff->phys);
++ kfree(sysfs_mbox->txmit_buff);
++ }
++
++ /* If rcv buffer allocated free txmit buffer */
++ if (sysfs_mbox->rcv_buff) {
++ if (sysfs_mbox->rcv_buff->virt)
++ __lpfc_mbuf_free(phba,
++ sysfs_mbox->rcv_buff->virt,
++ sysfs_mbox->rcv_buff->phys);
++ kfree(sysfs_mbox->rcv_buff);
++ }
++
++ kfree(sysfs_mbox);
++}
++
++static size_t
++lpfc_syfs_mbox_copy_rcv_buff(struct lpfc_hba *phba,
++ struct lpfc_sysfs_mbox *sysfs_mbox,
++ char *buf, loff_t off, size_t count)
++{
++ uint32_t size;
++ spin_lock_irq(&phba->hbalock);
++ if (!sysfs_mbox->mbox) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ if (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)
++ size = sysfs_mbox->mbox->mb.un.
++ varRdEventLog.rcv_bde64.tus.f.bdeSize;
++ else
++ size = sysfs_mbox->mbox->mb.un.
++ varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize;
++
++
++ if ((count + off) > size) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++ if (count > LPFC_BPL_SIZE) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++ if (sysfs_mbox->extoff != off) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ memcpy(buf, (uint8_t *) sysfs_mbox->rcv_buff->virt + off, count);
++ sysfs_mbox->extoff = off + count;
++
++ if (sysfs_mbox->extoff >= size)
++ sysfs_mbox_idle(phba, sysfs_mbox);
++
++ spin_unlock_irq(&phba->hbalock);
++
++ return count;
++}
++
++static size_t
++lpfc_syfs_mbox_copy_extdata(struct lpfc_hba *phba,
++ struct lpfc_sysfs_mbox * sysfs_mbox,
++ char *buf, loff_t off, size_t count)
++{
++ uint32_t size;
++
++ spin_lock_irq(&phba->hbalock);
++ if (!sysfs_mbox->mbox) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ size = sysfs_mbox->mbox_data.out_ext_wlen * sizeof(uint32_t);
++
++ if ((count + off) > size) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (size > MAILBOX_EXT_SIZE) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (sysfs_mbox->extoff != off) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ memcpy(buf, (uint8_t *) sysfs_mbox->mbext + off, count);
++ sysfs_mbox->extoff = off + count;
++
++ if (sysfs_mbox->extoff >= size)
++ sysfs_mbox_idle(phba, sysfs_mbox);
++
++ spin_unlock_irq(&phba->hbalock);
++
++ return count;
++}
++
++static size_t
++lpfc_syfs_mbox_copy_txmit_buff(struct lpfc_hba *phba,
++ struct lpfc_sysfs_mbox *sysfs_mbox,
++ char *buf, loff_t off, size_t count)
++{
++ uint32_t size;
++ spin_lock_irq(&phba->hbalock);
++ if (!sysfs_mbox->mbox ||
++ (sysfs_mbox->offset != sizeof(struct lpfc_sysfs_mbox_data))) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ size = sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.xmit_bde64.
++ tus.f.bdeSize;
++
++ if ((count + off) > size) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (size > LPFC_BPL_SIZE) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (sysfs_mbox->extoff != off) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ memcpy((uint8_t *) sysfs_mbox->txmit_buff->virt + off, buf, count);
++ sysfs_mbox->extoff = off + count;
++
++ spin_unlock_irq(&phba->hbalock);
++
++ return count;
+ }
+
+ /**
+@@ -3018,6 +3527,9 @@ sysfs_mbox_write(struct kobject *kobj, s
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfcMboxq *mbox = NULL;
++ struct lpfc_sysfs_mbox *sysfs_mbox;
++ uint8_t *ext;
++ uint32_t size;
+
+ if ((count + off) > MAILBOX_CMD_SIZE)
+ return -ERANGE;
+@@ -3029,34 +3541,232 @@ sysfs_mbox_write(struct kobject *kobj, s
+ return 0;
+
+ if (off == 0) {
++ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1);
++ if (sysfs_mbox == NULL)
++ return -ENOMEM;
++ /*
++ * If sysfs expect the reading of buffer and
++ * app doesnot know how to do it, use a different
++ * context.
++ */
++ if (sysfs_mbox->state == SMBOX_READING_BUFF ||
++ sysfs_mbox->state == SMBOX_READING_MBEXT) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1);
++ if (sysfs_mbox == NULL)
++ return -ENOMEM;
++ }
++ } else {
++ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0);
++ if (sysfs_mbox == NULL)
++ return -EAGAIN;
++ }
++ spin_lock_irq(&phba->hbalock);
++ if (sysfs_mbox->state == SMBOX_WRITING_MBEXT) {
++ if (!sysfs_mbox->mbox ||
++ (sysfs_mbox->offset !=
++ sizeof(struct lpfc_sysfs_mbox_data))) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ size = sysfs_mbox->mbox_data.in_ext_wlen * sizeof(uint32_t);
++
++ if ((count + sysfs_mbox->extoff) > size) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (size > MAILBOX_EXT_SIZE) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (!sysfs_mbox->mbext) {
++ spin_unlock_irq(&phba->hbalock);
++
++ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
++ if (!ext) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox->mbext = ext;
++ }
++
++ if (sysfs_mbox->extoff != off) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EAGAIN;
++ }
++
++ memcpy((uint8_t *) sysfs_mbox->mbext + off, buf, count);
++ sysfs_mbox->extoff = off + count;
++
++ spin_unlock_irq(&phba->hbalock);
++
++ return count;
++ }
++
++ spin_unlock_irq(&phba->hbalock);
++
++ if (sysfs_mbox->state == SMBOX_WRITING_BUFF)
++ return lpfc_syfs_mbox_copy_txmit_buff(phba,
++ sysfs_mbox, buf, off, count);
++
++ if ((count + off) > sizeof(struct lpfc_sysfs_mbox_data)) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ERANGE;
++ }
++
++ if (off == 0) {
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+- if (!mbox)
++ if (!mbox) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
+ return -ENOMEM;
++ }
+ memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
+ }
+
+ spin_lock_irq(&phba->hbalock);
+
+ if (off == 0) {
+- if (phba->sysfs_mbox.mbox)
++ if (sysfs_mbox->mbox)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else
+- phba->sysfs_mbox.mbox = mbox;
+- phba->sysfs_mbox.state = SMBOX_WRITING;
++ sysfs_mbox->mbox = mbox;
++ sysfs_mbox->state = SMBOX_WRITING;
+ } else {
+- if (phba->sysfs_mbox.state != SMBOX_WRITING ||
+- phba->sysfs_mbox.offset != off ||
+- phba->sysfs_mbox.mbox == NULL) {
+- sysfs_mbox_idle(phba);
++ if (sysfs_mbox->state != SMBOX_WRITING ||
++ sysfs_mbox->offset != off ||
++ sysfs_mbox->mbox == NULL) {
++ sysfs_mbox_idle(phba, sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EAGAIN;
+ }
+ }
+
+- memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
++ memcpy((uint8_t *) & sysfs_mbox->mbox_data + off,
+ buf, count);
+
+- phba->sysfs_mbox.offset = off + count;
++ sysfs_mbox->offset = off + count;
++
++ if (sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) {
++ memcpy((uint8_t *) & sysfs_mbox->mbox->mb,
++ (uint8_t *) &sysfs_mbox->mbox_data.mbox,
++ sizeof(MAILBOX_t));
++ }
++
++ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
++ (sysfs_mbox->mbox_data.in_ext_wlen ||
++ sysfs_mbox->mbox_data.out_ext_wlen)) {
++
++ if (!sysfs_mbox->mbext) {
++ spin_unlock_irq(&phba->hbalock);
++
++ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
++ if (!ext) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox->mbext = ext;
++ }
++ }
++
++ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
++ (sysfs_mbox->mbox_data.in_ext_wlen)) {
++ sysfs_mbox->state = SMBOX_WRITING_MBEXT;
++ }
++
++ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
++ (sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64)) {
++ sysfs_mbox->state = SMBOX_WRITING_BUFF;
++ spin_unlock_irq(&phba->hbalock);
++
++ /* Allocate txmit buffer */
++ sysfs_mbox->txmit_buff =
++ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
++ if (!sysfs_mbox->txmit_buff) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++ INIT_LIST_HEAD(&sysfs_mbox->txmit_buff->list);
++ sysfs_mbox->txmit_buff->virt =
++ lpfc_mbuf_alloc(phba, 0,
++ &(sysfs_mbox->txmit_buff->phys));
++ if (!sysfs_mbox->txmit_buff->virt) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++
++ /* Allocate rcv buffer */
++ sysfs_mbox->rcv_buff =
++ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
++ if (!sysfs_mbox->rcv_buff) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list);
++ sysfs_mbox->rcv_buff->virt =
++ lpfc_mbuf_alloc(phba, 0,
++ &(sysfs_mbox->rcv_buff->phys));
++ if (!sysfs_mbox->rcv_buff->virt) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++ return count;
++ }
++ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
++ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)) {
++ sysfs_mbox->state = SMBOX_WRITING;
++ spin_unlock_irq(&phba->hbalock);
++
++
++ /* Allocate rcv buffer */
++ sysfs_mbox->rcv_buff =
++ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
++ if (!sysfs_mbox->rcv_buff) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list);
++ sysfs_mbox->rcv_buff->virt =
++ lpfc_mbuf_alloc(phba, 0,
++ &(sysfs_mbox->rcv_buff->phys));
++ if (!sysfs_mbox->rcv_buff->virt) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -ENOMEM;
++ }
++ return count;
++ }
+
+ spin_unlock_irq(&phba->hbalock);
+
+@@ -3095,6 +3805,42 @@ sysfs_mbox_read(struct kobject *kobj, st
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
++ int wait_4_menlo_maint = 0;
++ struct lpfc_sysfs_mbox *sysfs_mbox;
++ ssize_t ret;
++ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0);
++
++ if (!sysfs_mbox)
++ return -EPERM;
++
++ /*
++ * If sysfs expect the writing of buffer and
++ * app doesnot know how to do it, fail the mailbox
++ * command.
++ */
++ if ((sysfs_mbox->state == SMBOX_WRITING_BUFF) &&
++ (sysfs_mbox->extoff == 0)) {
++ spin_lock_irq(&phba->hbalock);
++ sysfs_mbox_idle(phba, sysfs_mbox);
++ spin_unlock_irq(&phba->hbalock);
++ return -EINVAL;
++ }
++ if (sysfs_mbox->state == SMBOX_READING_BUFF) {
++ ret = lpfc_syfs_mbox_copy_rcv_buff(phba, sysfs_mbox,
++ buf, off, count);
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "1245 mbox: cmd 0x%x, 0x%x ret %x\n",
++ sysfs_mbox->mbox->mb.mbxCommand,
++ sysfs_mbox->mbox->mb.un.varWords[0],
++ (uint32_t)ret);
++ return ret;
++ }
++
++ if (sysfs_mbox->state == SMBOX_READING_MBEXT) {
++ ret = lpfc_syfs_mbox_copy_extdata(phba, sysfs_mbox,
++ buf, off, count);
++ return ret;
++ }
+
+ if (off > MAILBOX_CMD_SIZE)
+ return -ERANGE;
+@@ -3111,16 +3857,18 @@ sysfs_mbox_read(struct kobject *kobj, st
+ spin_lock_irq(&phba->hbalock);
+
+ if (phba->over_temp_state == HBA_OVER_TEMP) {
+- sysfs_mbox_idle(phba);
++ sysfs_mbox_idle(phba, sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EACCES;
+ }
+
+ if (off == 0 &&
+- phba->sysfs_mbox.state == SMBOX_WRITING &&
+- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
++ ((sysfs_mbox->state == SMBOX_WRITING) ||
++ (sysfs_mbox->state == SMBOX_WRITING_MBEXT) ||
++ (sysfs_mbox->state == SMBOX_WRITING_BUFF) ) &&
++ sysfs_mbox->offset >= 2 * sizeof(uint32_t)) {
+
+- switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
++ switch (sysfs_mbox->mbox->mb.mbxCommand) {
+ /* Offline only */
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+@@ -3133,12 +3881,11 @@ sysfs_mbox_read(struct kobject *kobj, st
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_SET_MASK:
+- case MBX_SET_DEBUG:
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ printk(KERN_WARNING "mbox_read:Command 0x%x "
+ "is illegal in on-line state\n",
+- phba->sysfs_mbox.mbox->mb.mbxCommand);
+- sysfs_mbox_idle(phba);
++ sysfs_mbox->mbox->mb.mbxCommand);
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EPERM;
+ }
+@@ -3160,11 +3907,63 @@ sysfs_mbox_read(struct kobject *kobj, st
+ case MBX_LOAD_EXP_ROM:
+ case MBX_BEACON:
+ case MBX_DEL_LD_ENTRY:
+- case MBX_SET_VARIABLE:
++ case MBX_SET_DEBUG:
+ case MBX_WRITE_WWN:
++ case MBX_READ_EVENT_LOG_STATUS:
++ case MBX_WRITE_EVENT_LOG:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
+ break;
++ case MBX_SET_VARIABLE:
++ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
++ "1226 mbox: set_variable 0x%x, 0x%x\n",
++ sysfs_mbox->mbox->mb.un.varWords[0],
++ sysfs_mbox->mbox->mb.un.varWords[1]);
++ if ((sysfs_mbox->mbox->mb.un.varWords[0] ==
++ SETVAR_MLOMNT) &&
++ (sysfs_mbox->mbox->mb.un.varWords[1] == 1)) {
++ wait_4_menlo_maint = 1;
++ phba->wait_4_mlo_maint_flg = 1;
++ }
++ break;
++ case MBX_RUN_BIU_DIAG64:
++ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ xmit_bde64.tus.f.bdeSize) {
++ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ xmit_bde64.addrHigh =
++ putPaddrHigh(sysfs_mbox->
++ txmit_buff->phys);
++ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ xmit_bde64.addrLow =
++ putPaddrLow(sysfs_mbox->
++ txmit_buff->phys);
++ }
++
++ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ rcv_bde64.tus.f.bdeSize) {
++ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ rcv_bde64.addrHigh =
++ putPaddrHigh(sysfs_mbox->
++ rcv_buff->phys);
++ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
++ rcv_bde64.addrLow =
++ putPaddrLow(sysfs_mbox->rcv_buff->phys);
++ }
++ break;
++ case MBX_READ_EVENT_LOG:
++
++ if (sysfs_mbox->mbox->mb.un.varRdEventLog.
++ rcv_bde64.tus.f.bdeSize) {
++ sysfs_mbox->mbox->mb.un.varRdEventLog.
++ rcv_bde64.addrHigh =
++ putPaddrHigh(sysfs_mbox->
++ rcv_buff->phys);
++ sysfs_mbox->mbox->mb.un.varRdEventLog.
++ rcv_bde64.addrLow =
++ putPaddrLow(sysfs_mbox->rcv_buff->phys);
++ }
++ break;
++
+ case MBX_READ_SPARM64:
+ case MBX_READ_LA:
+ case MBX_READ_LA64:
+@@ -3173,38 +3972,51 @@ sysfs_mbox_read(struct kobject *kobj, st
+ case MBX_CONFIG_PORT:
+ case MBX_RUN_BIU_DIAG:
+ printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
+- phba->sysfs_mbox.mbox->mb.mbxCommand);
+- sysfs_mbox_idle(phba);
++ sysfs_mbox->mbox->mb.mbxCommand);
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EPERM;
+ default:
+ printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
+- phba->sysfs_mbox.mbox->mb.mbxCommand);
+- sysfs_mbox_idle(phba);
++ sysfs_mbox->mbox->mb.mbxCommand);
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EPERM;
+ }
+
++ if (sysfs_mbox->mbox_data.in_ext_wlen ||
++ sysfs_mbox->mbox_data.out_ext_wlen) {
++ sysfs_mbox->mbox->context2 = sysfs_mbox->mbext;
++ sysfs_mbox->mbox->in_ext_byte_len =
++ sysfs_mbox->mbox_data.in_ext_wlen *
++ sizeof(uint32_t);
++ sysfs_mbox->mbox->out_ext_byte_len =
++ sysfs_mbox->mbox_data.out_ext_wlen *
++ sizeof(uint32_t);
++ sysfs_mbox->mbox->mbox_offset_word =
++ sysfs_mbox->mbox_data.mboffset;
++ }
++
+ /* If HBA encountered an error attention, allow only DUMP
+ * or RESTART mailbox commands until the HBA is restarted.
+ */
+ if (phba->pport->stopped &&
+- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
+- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
+- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
+- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
++ sysfs_mbox->mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
++ sysfs_mbox->mbox->mb.mbxCommand != MBX_RESTART &&
++ sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
++ sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_WWN)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "1259 mbox: Issued mailbox cmd "
+ "0x%x while in stopped state.\n",
+- phba->sysfs_mbox.mbox->mb.mbxCommand);
++ sysfs_mbox->mbox->mb.mbxCommand);
+
+- phba->sysfs_mbox.mbox->vport = vport;
++ sysfs_mbox->mbox->vport = vport;
+
+ /* Don't allow mailbox commands to be sent when blocked
+ * or when in the middle of discovery
+ */
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+- sysfs_mbox_idle(phba);
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EAGAIN;
+ }
+@@ -3214,43 +4026,86 @@ sysfs_mbox_read(struct kobject *kobj, st
+
+ spin_unlock_irq(&phba->hbalock);
+ rc = lpfc_sli_issue_mbox (phba,
+- phba->sysfs_mbox.mbox,
++ sysfs_mbox->mbox,
+ MBX_POLL);
+ spin_lock_irq(&phba->hbalock);
+
+ } else {
+ spin_unlock_irq(&phba->hbalock);
+ rc = lpfc_sli_issue_mbox_wait (phba,
+- phba->sysfs_mbox.mbox,
++ sysfs_mbox->mbox,
+ lpfc_mbox_tmo_val(phba,
+- phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
++ sysfs_mbox->mbox->mb.mbxCommand) * HZ);
+ spin_lock_irq(&phba->hbalock);
+ }
+
+ if (rc != MBX_SUCCESS) {
+ if (rc == MBX_TIMEOUT) {
+- phba->sysfs_mbox.mbox = NULL;
++ sysfs_mbox->mbox = NULL;
+ }
+- sysfs_mbox_idle(phba);
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ }
+- phba->sysfs_mbox.state = SMBOX_READING;
++ if (wait_4_menlo_maint) {
++ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
++ "1229 waiting for menlo mnt\n");
++ spin_unlock_irq(&phba->hbalock);
++ if (phba->wait_4_mlo_maint_flg)
++ wait_event_interruptible_timeout(
++ phba->wait_4_mlo_m_q,
++ phba->wait_4_mlo_maint_flg == 0,
++ 60 * HZ);
++ spin_lock_irq(&phba->hbalock);
++ if (phba->wait_4_mlo_maint_flg) {
++ sysfs_mbox_idle(phba,sysfs_mbox);
++ phba->wait_4_mlo_maint_flg = 0;
++ spin_unlock_irq(&phba->hbalock);
++ return -EINTR;
++ } else
++ spin_unlock_irq(&phba->hbalock);
++
++ spin_lock_irq(&phba->hbalock);
++ if (phba->wait_4_mlo_maint_flg != 0) {
++ sysfs_mbox_idle(phba,sysfs_mbox);
++ phba->wait_4_mlo_maint_flg = 0;
++ spin_unlock_irq(&phba->hbalock);
++ return -ETIME;
++ }
++
++ }
++ sysfs_mbox->state = SMBOX_READING;
+ }
+- else if (phba->sysfs_mbox.offset != off ||
+- phba->sysfs_mbox.state != SMBOX_READING) {
+- printk(KERN_WARNING "mbox_read: Bad State\n");
+- sysfs_mbox_idle(phba);
++ else if (sysfs_mbox->offset != off ||
++ sysfs_mbox->state != SMBOX_READING) {
++ sysfs_mbox_idle(phba,sysfs_mbox);
+ spin_unlock_irq(&phba->hbalock);
+ return -EAGAIN;
+ }
+
+- memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
++ memcpy(buf, (uint8_t *) & sysfs_mbox->mbox->mb + off, count);
++
++ sysfs_mbox->offset = off + count;
+
+- phba->sysfs_mbox.offset = off + count;
++ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) &&
++ ((sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64) ||
++ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG))) {
++ sysfs_mbox->state = SMBOX_READING_BUFF;
++ sysfs_mbox->extoff = 0;
++ spin_unlock_irq(&phba->hbalock);
++ return count;
++ }
++
++ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) &&
++ sysfs_mbox->mbox_data.out_ext_wlen) {
++ sysfs_mbox->state = SMBOX_READING_MBEXT;
++ sysfs_mbox->extoff = 0;
++ spin_unlock_irq(&phba->hbalock);
++ return count;
++ }
+
+- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
+- sysfs_mbox_idle(phba);
++ if (sysfs_mbox->offset == MAILBOX_CMD_SIZE)
++ sysfs_mbox_idle(phba,sysfs_mbox);
+
+ spin_unlock_irq(&phba->hbalock);
+
+@@ -3262,7 +4117,7 @@ static struct bin_attribute sysfs_mbox_a
+ .name = "mbox",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+- .size = MAILBOX_CMD_SIZE,
++ .size = MAILBOX_MAX_XMIT_SIZE,
+ .read = sysfs_mbox_read,
+ .write = sysfs_mbox_write,
+ };
+@@ -3317,6 +4172,7 @@ lpfc_free_sysfs_attr(struct lpfc_vport *
+ &sysfs_drvr_stat_data_attr);
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
++ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_menlo_attr);
+ }
+
+
+@@ -3935,7 +4791,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
+ phba->cfg_soft_wwpn = 0L;
+ lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
+ /* Also reinitialize the host templates with new values. */
+- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ /*
+ * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+@@ -3970,5 +4825,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vpor
+ lpfc_max_luns_init(vport, lpfc_max_luns);
+ lpfc_scan_down_init(vport, lpfc_scan_down);
+ lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
++ lpfc_enable_auth_init(vport, lpfc_enable_auth);
+ return;
+ }
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_auth_access.c
+@@ -0,0 +1,598 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++#include <linux/blkdev.h>
++#include <linux/pci.h>
++#include <linux/kthread.h>
++#include <linux/interrupt.h>
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sched.h> /* workqueue stuff, HZ */
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_transport.h>
++#include <scsi/scsi_transport_fc.h>
++#include <scsi/scsi_cmnd.h>
++#include <linux/time.h>
++#include <linux/jiffies.h>
++#include <linux/security.h>
++#include <net/sock.h>
++#include <net/netlink.h>
++
++#include <scsi/scsi.h>
++
++#include "lpfc_hw.h"
++#include "lpfc_sli.h"
++#include "lpfc_nl.h"
++#include "lpfc_disc.h"
++#include "lpfc_scsi.h"
++#include "lpfc.h"
++#include "lpfc_logmsg.h"
++#include "lpfc_crtn.h"
++#include "lpfc_vport.h"
++#include "lpfc_auth_access.h"
++
++/* fc security */
++struct workqueue_struct *security_work_q = NULL;
++struct list_head fc_security_user_list;
++int fc_service_state = FC_SC_SERVICESTATE_UNKNOWN;
++static int fc_service_pid;
++DEFINE_SPINLOCK(fc_security_user_lock);
++
++static inline struct lpfc_vport *
++lpfc_fc_find_vport(unsigned long host_no)
++{
++ struct lpfc_vport *vport;
++ struct Scsi_Host *shost;
++
++ list_for_each_entry(vport, &fc_security_user_list, sc_users) {
++ shost = lpfc_shost_from_vport(vport);
++ if (shost && (shost->host_no == host_no))
++ return vport;
++ }
++
++ return NULL;
++}
++
++
++/**
++ * lpfc_fc_sc_add_timer
++ *
++ *
++ **/
++
++void
++lpfc_fc_sc_add_timer(struct fc_security_request *req, int timeout,
++ void (*complete)(struct fc_security_request *))
++{
++
++ init_timer(&req->timer);
++
++
++ req->timer.data = (unsigned long)req;
++ req->timer.expires = jiffies + timeout;
++ req->timer.function = (void (*)(unsigned long)) complete;
++
++ add_timer(&req->timer);
++}
++/**
++ * lpfc_fc_sc_req_times_out
++ *
++ *
++ **/
++
++void
++lpfc_fc_sc_req_times_out(struct fc_security_request *req)
++{
++
++ unsigned long flags;
++ int found = 0;
++ struct fc_security_request *fc_sc_req;
++ struct lpfc_vport *vport = req->vport;
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++
++ if (!req)
++ return;
++
++ spin_lock_irqsave(shost->host_lock, flags);
++
++ /* To avoid a completion race check to see if request is on the list */
++
++ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist)
++ if (fc_sc_req == req) {
++ found = 1;
++ break;
++ }
++
++ if (!found) {
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ return;
++ }
++
++ list_del(&fc_sc_req->rlist);
++
++ spin_unlock_irqrestore(shost->host_lock, flags);
++
++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
++ "1019 Request tranid %d timed out\n",
++ fc_sc_req->tran_id);
++
++ switch (fc_sc_req->req_type) {
++
++ case FC_NL_SC_GET_CONFIG_REQ:
++ lpfc_security_config(shost, -ETIMEDOUT,
++ fc_sc_req->data);
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ:
++ lpfc_dhchap_make_challenge(shost, -ETIMEDOUT,
++ fc_sc_req->data, 0);
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ:
++ lpfc_dhchap_make_response(shost, -ETIMEDOUT,
++ fc_sc_req->data, 0);
++ break;
++
++ case FC_NL_SC_DHCHAP_AUTHENTICATE_REQ:
++ lpfc_dhchap_authenticate(shost, -ETIMEDOUT, fc_sc_req->data, 0);
++ break;
++ }
++
++ kfree(fc_sc_req);
++
++}
++
++
++static inline struct fc_security_request *
++lpfc_fc_find_sc_request(u32 tran_id, u32 type, struct lpfc_vport *vport)
++{
++ struct fc_security_request *fc_sc_req;
++
++ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist)
++ if (fc_sc_req->tran_id == tran_id &&
++ fc_sc_req->req_type == type)
++ return fc_sc_req;
++ return NULL;
++}
++
++
++
++/**
++ * lpfc_fc_sc_request
++ *
++ *
++ **/
++
++int
++lpfc_fc_sc_request(struct lpfc_vport *vport,
++ u32 msg_type,
++ struct fc_auth_req *auth_req,
++ u32 auth_req_len, /* includes length of struct fc_auth_req */
++ struct fc_auth_rsp *auth_rsp,
++ u32 auth_rsp_len) /* includes length of struct fc_auth_rsp */
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct fc_security_request *fc_sc_req;
++ struct fc_nl_sc_message *fc_nl_sc_msg;
++ unsigned long flags;
++ u32 len;
++ u32 seq = ++vport->sc_tran_id;
++
++ if (fc_service_state != FC_SC_SERVICESTATE_ONLINE)
++ return -EINVAL;
++
++ if (vport->port_state == FC_PORTSTATE_DELETED)
++ return -EINVAL;
++
++ fc_sc_req = kzalloc(sizeof(struct fc_security_request), GFP_KERNEL);
++ if (!fc_sc_req)
++ return -ENOMEM;
++
++ fc_sc_req->req_type = msg_type;
++ fc_sc_req->data = auth_rsp;
++ fc_sc_req->data_len = auth_rsp_len;
++ fc_sc_req->vport = vport;
++ fc_sc_req->tran_id = seq;
++
++ len = sizeof(struct fc_nl_sc_message) + auth_req_len;
++ fc_nl_sc_msg = kzalloc(sizeof(struct fc_nl_sc_message) + auth_req_len,
++ GFP_KERNEL);
++ if (!fc_nl_sc_msg)
++ return -ENOMEM;
++ fc_nl_sc_msg->msgtype = msg_type;
++ fc_nl_sc_msg->data_len = auth_req_len;
++ memcpy(fc_nl_sc_msg->data, auth_req, auth_req_len);
++ fc_nl_sc_msg->tran_id = seq;
++
++ spin_lock_irqsave(shost->host_lock, flags);
++ list_add_tail(&fc_sc_req->rlist, &vport->sc_response_wait_queue);
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ scsi_nl_send_vendor_msg(fc_service_pid, shost->host_no,
++ (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX),
++ (char *) fc_nl_sc_msg, len);
++ lpfc_fc_sc_add_timer(fc_sc_req, FC_SC_REQ_TIMEOUT,
++ lpfc_fc_sc_req_times_out);
++ return 0;
++}
++
++/**
++ * lpfc_fc_security_get_config
++ *
++ *
++ **/
++
++int
++lpfc_fc_security_get_config(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 auth_req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 auth_rsp_len)
++{
++
++ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
++ FC_NL_SC_GET_CONFIG_REQ, auth_req,
++ auth_req_len, auth_rsp, auth_rsp_len));
++
++}
++EXPORT_SYMBOL(lpfc_fc_security_get_config);
++
++/**
++ * lpfc_fc_security_dhchap_make_challenge
++ *
++ *
++ **/
++
++int
++lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 auth_req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 auth_rsp_len)
++{
++
++ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
++ FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ,
++ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
++
++}
++EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_challenge);
++
++/**
++ * lpfc_fc_security_dhchap_make_response
++ *
++ *
++ **/
++
++int
++lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 auth_req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 auth_rsp_len)
++{
++
++ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
++ FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ,
++ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
++
++}
++EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_response);
++
++
++/**
++ * lpfc_fc_security_dhchap_authenticate
++ *
++ *
++ **/
++
++int
++lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 auth_req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 auth_rsp_len)
++{
++
++ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
++ FC_NL_SC_DHCHAP_AUTHENTICATE_REQ,
++ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
++
++}
++EXPORT_SYMBOL(lpfc_fc_security_dhchap_authenticate);
++
++/**
++ * lpfc_fc_queue_security_work - Queue work to the fc_host security workqueue.
++ * @shost: Pointer to Scsi_Host bound to fc_host.
++ * @work: Work to queue for execution.
++ *
++ * Return value:
++ * 1 - work queued for execution
++ * 0 - work is already queued
++ * -EINVAL - work queue doesn't exist
++ **/
++int
++lpfc_fc_queue_security_work(struct lpfc_vport *vport, struct work_struct *work)
++{
++ if (unlikely(!security_work_q)) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1021 ERROR: attempted to queue security work, "
++ "when no workqueue created.\n");
++ dump_stack();
++
++ return -EINVAL;
++ }
++
++ return queue_work(security_work_q, work);
++
++}
++
++
++
++ /**
++ * lpfc_fc_sc_schedule_notify_all
++ *
++ *
++ **/
++
++void
++lpfc_fc_sc_schedule_notify_all(int message)
++{
++ struct lpfc_vport *vport;
++ unsigned long flags;
++
++ spin_lock_irqsave(&fc_security_user_lock, flags);
++
++ list_for_each_entry(vport, &fc_security_user_list, sc_users) {
++
++ switch (message) {
++
++ case FC_NL_SC_REG:
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_online_work);
++ break;
++
++ case FC_NL_SC_DEREG:
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_offline_work);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&fc_security_user_lock, flags);
++}
++
++
++
++/**
++ * lpfc_fc_sc_security_online
++ *
++ *
++ **/
++
++void
++lpfc_fc_sc_security_online(struct work_struct *work)
++{
++ struct lpfc_vport *vport = container_of(work, struct lpfc_vport,
++ sc_online_work);
++ lpfc_security_service_online(lpfc_shost_from_vport(vport));
++ return;
++}
++
++/**
++ * lpfc_fc_sc_security_offline
++ *
++ *
++ **/
++void
++lpfc_fc_sc_security_offline(struct work_struct *work)
++{
++ struct lpfc_vport *vport = container_of(work, struct lpfc_vport,
++ sc_offline_work);
++ lpfc_security_service_offline(lpfc_shost_from_vport(vport));
++ return;
++}
++
++
++/**
++ * lpfc_fc_sc_process_msg
++ *
++ *
++ **/
++static void
++lpfc_fc_sc_process_msg(struct work_struct *work)
++{
++ struct fc_sc_msg_work_q_wrapper *wqw =
++ container_of(work, struct fc_sc_msg_work_q_wrapper, work);
++
++ switch (wqw->msgtype) {
++
++ case FC_NL_SC_GET_CONFIG_RSP:
++ lpfc_security_config(lpfc_shost_from_vport(wqw->fc_sc_req->
++ vport), wqw->status,
++ wqw->fc_sc_req->data);
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
++ lpfc_dhchap_make_challenge(lpfc_shost_from_vport(wqw->
++ fc_sc_req->vport), wqw->status,
++ wqw->fc_sc_req->data, wqw->data_len);
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
++ lpfc_dhchap_make_response(lpfc_shost_from_vport(wqw->
++ fc_sc_req->vport), wqw->status,
++ wqw->fc_sc_req->data, wqw->data_len);
++ break;
++
++ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
++ lpfc_dhchap_authenticate(lpfc_shost_from_vport(wqw->fc_sc_req->
++ vport),
++ wqw->status,
++ wqw->fc_sc_req->data, wqw->data_len);
++ break;
++ }
++
++ kfree(wqw->fc_sc_req);
++ kfree(wqw);
++
++ return;
++}
++
++
++/**
++ * lpfc_fc_sc_schedule_msg
++ *
++ *
++ **/
++
++int
++lpfc_fc_sc_schedule_msg(struct Scsi_Host *shost,
++ struct fc_nl_sc_message *fc_nl_sc_msg, int rcvlen)
++{
++ struct fc_security_request *fc_sc_req;
++ u32 req_type;
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ int err = 0;
++ struct fc_sc_msg_work_q_wrapper *wqw;
++ unsigned long flags;
++
++ if (vport->port_state == FC_PORTSTATE_DELETED) {
++ printk(KERN_WARNING
++ "%s: Host being deleted.\n", __func__);
++ return -EBADR;
++ }
++
++ wqw = kzalloc(sizeof(struct fc_sc_msg_work_q_wrapper), GFP_KERNEL);
++
++ if (!wqw)
++ return -ENOMEM;
++
++ switch (fc_nl_sc_msg->msgtype) {
++ case FC_NL_SC_GET_CONFIG_RSP:
++ req_type = FC_NL_SC_GET_CONFIG_REQ;
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
++ req_type = FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ;
++ break;
++
++ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
++ req_type = FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ;
++ break;
++
++ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
++ req_type = FC_NL_SC_DHCHAP_AUTHENTICATE_REQ;
++ break;
++
++ default:
++ kfree(wqw);
++ return -EINVAL;
++ }
++
++ spin_lock_irqsave(shost->host_lock, flags);
++
++ fc_sc_req = lpfc_fc_find_sc_request(fc_nl_sc_msg->tran_id,
++ req_type, vport);
++
++ if (!fc_sc_req) {
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
++ "1022 Security request does not exist.\n");
++ kfree(wqw);
++ return -EBADR;
++ }
++
++ list_del(&fc_sc_req->rlist);
++
++ spin_unlock_irqrestore(shost->host_lock, flags);
++
++ del_singleshot_timer_sync(&fc_sc_req->timer);
++
++ wqw->status = 0;
++ wqw->fc_sc_req = fc_sc_req;
++ wqw->data_len = rcvlen;
++ wqw->msgtype = fc_nl_sc_msg->msgtype;
++
++ if (!fc_sc_req->data ||
++ (fc_sc_req->data_len < fc_nl_sc_msg->data_len)) {
++ wqw->status = -ENOBUFS;
++ wqw->data_len = 0;
++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
++ "1023 Warning - data may have been truncated. "
++ "data:%p reqdl:%x mesdl:%x\n",
++ fc_sc_req->data,
++ fc_sc_req->data_len, fc_nl_sc_msg->data_len);
++ } else {
++ memcpy(fc_sc_req->data, fc_nl_sc_msg->data,
++ fc_nl_sc_msg->data_len);
++ }
++
++ INIT_WORK(&wqw->work, lpfc_fc_sc_process_msg);
++ lpfc_fc_queue_security_work(vport, &wqw->work);
++
++ return err;
++}
++
++int
++lpfc_rcv_nl_msg(struct Scsi_Host *shost, void *payload,
++ uint32_t len, uint32_t pid)
++{
++ struct fc_nl_sc_message *msg = (struct fc_nl_sc_message *)payload;
++ int err = 0;
++
++ switch (msg->msgtype) {
++ case FC_NL_SC_REG:
++ fc_service_pid = pid;
++ fc_service_state = FC_SC_SERVICESTATE_ONLINE;
++ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_REG);
++ break;
++ case FC_NL_SC_DEREG:
++ fc_service_pid = pid;
++ fc_service_state = FC_SC_SERVICESTATE_OFFLINE;
++ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG);
++ break;
++ case FC_NL_SC_GET_CONFIG_RSP:
++ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
++ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
++ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
++ err = lpfc_fc_sc_schedule_msg(shost, msg, len);
++ break;
++ default:
++ printk(KERN_WARNING "%s: unknown msg type 0x%x len %d\n",
++ __func__, msg->msgtype, len);
++ break;
++ }
++ return err;
++}
++
++void
++lpfc_rcv_nl_event(struct notifier_block *this,
++ unsigned long event,
++ void *ptr)
++{
++ struct netlink_notify *n = ptr;
++ if ((event == NETLINK_URELEASE) &&
++ (n->protocol == NETLINK_SCSITRANSPORT) && (n->pid)) {
++ printk(KERN_WARNING "Warning - Security Service Offline\n");
++ fc_service_state = FC_SC_SERVICESTATE_OFFLINE;
++ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG);
++ }
++}
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_auth_access.h
+@@ -0,0 +1,245 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
++
++/* scsi_nl_hdr->version value */
++#define SCSI_NL_VERSION 1
++
++/* scsi_nl_hdr->magic value */
++#define SCSI_NL_MAGIC 0xA1B2
++
++/* scsi_nl_hdr->transport value */
++#define SCSI_NL_TRANSPORT 0
++#define SCSI_NL_TRANSPORT_FC 1
++#define SCSI_NL_MAX_TRANSPORTS 2
++
++#define FC_NL_GROUP_CNT 0
++
++ /* Note: when specifying vendor_id to fc_host_post_vendor_event()
++ * be sure to read the Vendor Type and ID formatting requirements
++ * specified in scsi_netlink.h
++ */
++
++#define FC_SC_REQ_TIMEOUT (60*HZ)
++
++enum fc_sc_service_state {
++ FC_SC_SERVICESTATE_UNKNOWN,
++ FC_SC_SERVICESTATE_ONLINE,
++ FC_SC_SERVICESTATE_OFFLINE,
++ FC_SC_SERVICESTATE_ERROR,
++};
++
++struct fc_security_request {
++ struct list_head rlist;
++ int pid;
++ u32 tran_id;
++ u32 req_type;
++ struct timer_list timer;
++ struct lpfc_vport *vport;
++ u32 data_len;
++ void *data;
++};
++
++struct fc_sc_msg_work_q_wrapper {
++ struct work_struct work;
++ struct fc_security_request *fc_sc_req;
++ u32 data_len;
++ int status;
++ u32 msgtype;
++};
++struct fc_sc_notify_work_q_wrapper {
++ struct work_struct work;
++ struct Scsi_Host *shost;
++ int msg;
++};
++
++#define FC_DHCHAP 1
++#define FC_FCAP 2
++#define FC_FCPAP 3
++#define FC_KERBEROS 4
++
++#define FC_AUTHMODE_UNKNOWN 0
++#define FC_AUTHMODE_NONE 1
++#define FC_AUTHMODE_ACTIVE 2
++#define FC_AUTHMODE_PASSIVE 3
++
++#define FC_SP_HASH_MD5 0x5
++#define FC_SP_HASH_SHA1 0x6
++
++#define DH_GROUP_NULL 0x00
++#define DH_GROUP_1024 0x01
++#define DH_GROUP_1280 0x02
++#define DH_GROUP_1536 0x03
++#define DH_GROUP_2048 0x04
++
++#define MAX_AUTH_REQ_SIZE 1024
++#define MAX_AUTH_RSP_SIZE 1024
++
++#define AUTH_FABRIC_WWN 0xFFFFFFFFFFFFFFFFLL
++
++struct fc_auth_req {
++ uint64_t local_wwpn;
++ uint64_t remote_wwpn;
++ union {
++ struct dhchap_challenge_req {
++ uint32_t transaction_id;
++ uint32_t dh_group_id;
++ uint32_t hash_id;
++ } dhchap_challenge;
++ struct dhchap_reply_req {
++ uint32_t transaction_id;
++ uint32_t dh_group_id;
++ uint32_t hash_id;
++ uint32_t bidirectional;
++ uint32_t received_challenge_len;
++ uint32_t received_public_key_len;
++ uint8_t data[0];
++ } dhchap_reply;
++ struct dhchap_success_req {
++ uint32_t transaction_id;
++ uint32_t dh_group_id;
++ uint32_t hash_id;
++ uint32_t our_challenge_len;
++ uint32_t received_response_len;
++ uint32_t received_public_key_len;
++ uint32_t received_challenge_len;
++ uint8_t data[0];
++ } dhchap_success;
++ }u;
++} __attribute__ ((packed));
++
++struct fc_auth_rsp {
++ uint64_t local_wwpn;
++ uint64_t remote_wwpn;
++ union {
++ struct authinfo {
++ uint8_t auth_mode;
++ uint16_t auth_timeout;
++ uint8_t bidirectional;
++ uint8_t type_priority[4];
++ uint16_t type_len;
++ uint8_t hash_priority[4];
++ uint16_t hash_len;
++ uint8_t dh_group_priority[8];
++ uint16_t dh_group_len;
++ uint32_t reauth_interval;
++ } dhchap_security_config;
++ struct dhchap_challenge_rsp {
++ uint32_t transaction_id;
++ uint32_t our_challenge_len;
++ uint32_t our_public_key_len;
++ uint8_t data[0];
++ } dhchap_challenge;
++ struct dhchap_reply_rsp {
++ uint32_t transaction_id;
++ uint32_t our_challenge_rsp_len;
++ uint32_t our_public_key_len;
++ uint32_t our_challenge_len;
++ uint8_t data[0];
++ } dhchap_reply;
++ struct dhchap_success_rsp {
++ uint32_t transaction_id;
++ uint32_t authenticated;
++ uint32_t response_len;
++ uint8_t data[0];
++ } dhchap_success;
++ }u;
++}__attribute__ ((packed));
++
++int
++lpfc_fc_security_get_config(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 rsp_len);
++int
++lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 rsp_len);
++int
++lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 rsp_len);
++int
++lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost,
++ struct fc_auth_req *auth_req,
++ u32 req_len,
++ struct fc_auth_rsp *auth_rsp,
++ u32 rsp_len);
++
++int lpfc_fc_queue_security_work(struct lpfc_vport *,
++ struct work_struct *);
++
++/*
++ * FC Transport Message Types
++ */
++ /* user -> kernel */
++#define FC_NL_EVENTS_REG 0x0001
++#define FC_NL_EVENTS_DEREG 0x0002
++#define FC_NL_SC_REG 0x0003
++#define FC_NL_SC_DEREG 0x0004
++#define FC_NL_SC_GET_CONFIG_RSP 0x0005
++#define FC_NL_SC_SET_CONFIG_RSP 0x0006
++#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP 0x0007
++#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP 0x0008
++#define FC_NL_SC_DHCHAP_AUTHENTICATE_RSP 0x0009
++ /* kernel -> user */
++//#define FC_NL_ASYNC_EVENT 0x0100
++#define FC_NL_SC_GET_CONFIG_REQ 0x0020
++#define FC_NL_SC_SET_CONFIG_REQ 0x0030
++#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ 0x0040
++#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ 0x0050
++#define FC_NL_SC_DHCHAP_AUTHENTICATE_REQ 0x0060
++
++/*
++ * Message Structures :
++ */
++
++/* macro to round up message lengths to 8byte boundary */
++#define FC_NL_MSGALIGN(len) (((len) + 7) & ~7)
++
++#define FC_NETLINK_API_VERSION 1
++
++/* Single Netlink Message type to send all FC Transport messages */
++#define FC_TRANSPORT_MSG NLMSG_MIN_TYPE + 1
++
++/* SCSI_TRANSPORT_MSG event message header */
++/*
++struct scsi_nl_hdr {
++ uint8_t version;
++ uint8_t transport;
++ uint16_t magic;
++ uint16_t msgtype;
++ uint16_t msglen;
++} __attribute__((aligned(sizeof(uint64_t))));
++*/
++struct fc_nl_sc_message {
++ uint16_t msgtype;
++ uint16_t rsvd;
++ uint32_t tran_id;
++ uint32_t data_len;
++ uint8_t data[0];
++} __attribute__((aligned(sizeof(uint64_t))));
++
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_auth.c
+@@ -0,0 +1,838 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++/* See Fibre Channel protocol T11 FC-SP for details */
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_tcq.h>
++#include <scsi/scsi_transport_fc.h>
++
++#include "lpfc_hw.h"
++#include "lpfc_sli.h"
++#include "lpfc_nl.h"
++#include "lpfc_disc.h"
++#include "lpfc.h"
++#include "lpfc_crtn.h"
++#include "lpfc_logmsg.h"
++#include "lpfc_auth_access.h"
++#include "lpfc_auth.h"
++
++void
++lpfc_start_authentication(struct lpfc_vport *vport,
++ struct lpfc_nodelist *ndlp)
++{
++ uint32_t nego_payload_len;
++ uint8_t *nego_payload;
++
++ nego_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++ if (!nego_payload)
++ return;
++ vport->auth.trans_id++;
++ vport->auth.auth_msg_state = LPFC_AUTH_NEGOTIATE;
++ nego_payload_len = lpfc_build_auth_neg(vport, nego_payload);
++ lpfc_issue_els_auth(vport, ndlp, AUTH_NEGOTIATE,
++ nego_payload, nego_payload_len);
++ kfree(nego_payload);
++}
++
++void
++lpfc_dhchap_make_challenge(struct Scsi_Host *shost, int status,
++ void *rsp, uint32_t rsp_len)
++{
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_nodelist *ndlp;
++ uint32_t chal_payload_len;
++ uint8_t *chal_payload;
++ struct fc_auth_rsp *auth_rsp = rsp;
++
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
++ kfree(rsp);
++ return;
++ }
++
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1003 Send dhchap challenge local_wwpn "
++ "%llX remote_wwpn %llX \n",
++ (unsigned long long)auth_rsp->local_wwpn,
++ (unsigned long long)auth_rsp->remote_wwpn);
++
++ chal_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++ if (!chal_payload) {
++ kfree(rsp);
++ return;
++ }
++ vport->auth.auth_msg_state = LPFC_DHCHAP_CHALLENGE;
++ chal_payload_len = lpfc_build_dhchap_challenge(vport,
++ chal_payload, rsp);
++ lpfc_issue_els_auth(vport, ndlp, DHCHAP_CHALLENGE,
++ chal_payload, chal_payload_len);
++ kfree(chal_payload);
++ kfree(rsp);
++}
++
++
++void
++lpfc_dhchap_make_response(struct Scsi_Host *shost, int status,
++ void *rsp, uint32_t rsp_len)
++{
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_nodelist *ndlp;
++ uint32_t reply_payload_len;
++ uint8_t *reply_payload;
++ struct fc_auth_rsp *auth_rsp = rsp;
++
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
++ kfree(rsp);
++ return;
++ }
++
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1004 Send dhchap reply local_wwpn "
++ "%llX remote_wwpn %llX \n",
++ (unsigned long long)auth_rsp->local_wwpn,
++ (unsigned long long)auth_rsp->remote_wwpn);
++
++ reply_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++ if (!reply_payload) {
++ kfree(rsp);
++ return;
++ }
++
++ vport->auth.auth_msg_state = LPFC_DHCHAP_REPLY;
++ reply_payload_len = lpfc_build_dhchap_reply(vport, reply_payload, rsp);
++ lpfc_issue_els_auth(vport, ndlp, DHCHAP_REPLY,
++ reply_payload, reply_payload_len);
++ kfree(reply_payload);
++ kfree(rsp);
++
++}
++
++
++void
++lpfc_dhchap_authenticate(struct Scsi_Host *shost,
++ int status, void *rsp,
++ uint32_t rsp_len)
++{
++ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp;
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_nodelist *ndlp;
++
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
++ kfree(rsp);
++ return;
++ }
++ if (status != 0) {
++ lpfc_issue_els_auth_reject(vport, ndlp,
++ AUTH_ERR, AUTHENTICATION_FAILED);
++ kfree(rsp);
++ return;
++ }
++
++ if (auth_rsp->u.dhchap_success.authenticated) {
++ uint32_t suc_payload_len;
++ uint8_t *suc_payload;
++
++ suc_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++ if (!suc_payload) {
++ lpfc_issue_els_auth_reject(vport, ndlp,
++ AUTH_ERR, AUTHENTICATION_FAILED);
++ kfree(rsp);
++ return;
++ }
++ suc_payload_len = lpfc_build_dhchap_success(vport,
++ suc_payload, rsp);
++ if (suc_payload_len == sizeof(uint32_t)) {
++ /* Authentication is complete after sending this SUCCESS */
++ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS;
++ } else {
++ /* Need to wait for SUCCESS from Auth Initiator */
++ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS_REPLY;
++ }
++ lpfc_issue_els_auth(vport, ndlp, DHCHAP_SUCCESS,
++ suc_payload, suc_payload_len);
++ kfree(suc_payload);
++ vport->auth.direction |= AUTH_DIRECTION_LOCAL;
++ } else {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1005 AUTHENTICATION_FAILURE Nport:x%x\n",
++ ndlp->nlp_DID);
++ lpfc_issue_els_auth_reject(vport, ndlp,
++ AUTH_ERR, AUTHENTICATION_FAILED);
++ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
++ lpfc_port_auth_failed(ndlp);
++ }
++ }
++
++ kfree(rsp);
++}
++
++int
++lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message,
++ uint8_t *reason, uint8_t *explanation)
++{
++ uint32_t prot_len;
++ uint32_t param_len;
++ int i, j = 0;
++
++ /* Following is the format of the message. Name Format.
++ * uint16_t nameTag;
++ * uint16_t nameLength;
++ * uint8_t name[8];
++ * AUTH_Negotiate Message
++ * uint32_t NumberOfAuthProtocals
++ * uint32_t AuthProtParameter#1Len
++ * uint32_t AuthProtID#1 (DH-CHAP = 0x1)
++ * AUTH_Negotiate DH-CHAP
++ * uint16_t DH-CHAPParameterTag (HashList = 0x1)
++ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries)
++ * uint8_t DH-CHAPParameter[]; (uint32_t entries)
++ * uint16_t DH-CHAPParameterTag (DHglDList = 0x2)
++ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries)
++ * uint8_t DH-CHAPParameter[]; (uint32_t entries)
++ * DHCHAP_Challenge Message
++ * uint32_t hashIdentifier;
++ * uint32_t dhgroupIdentifier;
++ * uint32_t challengevalueLen;
++ * uint8_t challengeValue[];
++ * uint32_t dhvalueLen;
++ * uint8_t dhvalue[];
++ */
++
++ /* Name Tag */
++ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1006 Bad Name tag in auth message 0x%x\n",
++ be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Name Length */
++ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1007 Bad Name length in auth message 0x%x\n",
++ be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Skip over Remote Port Name */
++ message += NAME_LEN;
++
++ /* Number of Auth Protocols must be 1 DH-CHAP */
++ if (be32_to_cpu(*(uint32_t *)message) != 1) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1008 Bad Number of Protocols 0x%x\n",
++ be32_to_cpu(*(uint32_t *)message));
++ return 1;
++ }
++ message += sizeof(uint32_t);
++
++ /* Protocol Parameter Length */
++ prot_len = be32_to_cpu(*(uint32_t *)message);
++ message += sizeof(uint32_t);
++
++ /* Protocol Parameter type */
++ if (be32_to_cpu(*(uint32_t *)message) != FC_DHCHAP) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1009 Bad param type 0x%x\n",
++ be32_to_cpu(*(uint32_t *)message));
++ return 1;
++ }
++ message += sizeof(uint32_t);
++
++ /* Parameter #1 Tag */
++ if (be16_to_cpu(*(uint16_t *)message) != HASH_LIST_TAG) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1010 Bad Tag 1 0x%x\n",
++ be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Parameter #1 Length */
++ param_len = be16_to_cpu(*(uint16_t *)message);
++ message += sizeof(uint16_t);
++
++ /* Choose a hash function */
++ for (i = 0; i < vport->auth.hash_len; i++) {
++ for (j = 0; j < param_len; j++) {
++ if (vport->auth.hash_priority[i] ==
++ be32_to_cpu(((uint32_t *)message)[j]))
++ break;
++ }
++ if (j != param_len)
++ break;
++ }
++ if (i == vport->auth.hash_len && j == param_len) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1011 Auth_neg no hash function chosen.\n");
++ return 1;
++ }
++ vport->auth.hash_id = vport->auth.hash_priority[i];
++ message += sizeof(uint32_t) * param_len;
++
++ /* Parameter #2 Tag */
++ if (be16_to_cpu(*(uint16_t *)message) != DHGID_LIST_TAG) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1012 Auth_negotiate Bad Tag 2 0x%x\n",
++ be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Parameter #2 Length */
++ param_len = be16_to_cpu(*(uint16_t *)message);
++ message += sizeof(uint16_t);
++
++ /* Choose a DH Group */
++ for (i = 0; i < vport->auth.dh_group_len; i++) {
++ for (j = 0; j < param_len; j++) {
++ if (vport->auth.dh_group_priority[i] ==
++ be32_to_cpu(((uint32_t *)message)[j]))
++ break;
++ }
++ if (j != param_len)
++ break;
++ }
++ if (i == vport->auth.dh_group_len && j == param_len) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1013 Auth_negotiate no DH_group found. \n");
++ return 1;
++ }
++ vport->auth.group_id = vport->auth.dh_group_priority[i];
++ message += sizeof(uint32_t) * param_len;
++
++ return 0;
++}
++
++int
++lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
++ uint8_t *reason, uint8_t *explanation)
++{
++ int i;
++
++ /* Following is the format of the message DHCHAP_Challenge.
++ * uint16_t nameTag;
++ * uint16_t nameLength;
++ * uint8_t name[8];
++ * uint32_t hashIdentifier;
++ * uint32_t dhgroupIdentifier;
++ * uint32_t challengevalueLen;
++ * uint8_t challengeValue[];
++ * uint32_t dhvalueLen;
++ * uint8_t dhvalue[];
++ */
++
++ /* Name Tag */
++ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1014 dhchap challenge bad name tag 0x%x. \n",
++ be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Name Length */
++ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) {
++ *reason = AUTH_ERR;
++ *explanation = BAD_PAYLOAD;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1015 dhchap challenge bad name length "
++ "0x%x.\n", be16_to_cpu(*(uint16_t *)message));
++ return 1;
++ }
++ message += sizeof(uint16_t);
++
++ /* Remote Port Name */
++ message += NAME_LEN;
++
++ /* Hash ID */
++ vport->auth.hash_id = be32_to_cpu(*(uint32_t *)message); /* Hash id */
++ for (i = 0; i < vport->auth.hash_len; i++) {
++ if (vport->auth.hash_id == vport->auth.hash_priority[i])
++ break;
++ }
++ if (i == vport->auth.hash_len) {
++ *reason = LOGIC_ERR;
++ *explanation = BAD_ALGORITHM;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1016 dhchap challenge Hash ID not Supported "
++ "0x%x. \n", vport->auth.hash_id);
++ return 1;
++ }
++ message += sizeof(uint32_t);
++
++ vport->auth.group_id =
++ be32_to_cpu(*(uint32_t *)message); /* DH group id */
++ for (i = 0; i < vport->auth.dh_group_len; i++) {
++ if (vport->auth.group_id == vport->auth.dh_group_priority[i])
++ break;
++ }
++ if (i == vport->auth.dh_group_len) {
++ *reason = LOGIC_ERR;
++ *explanation = BAD_DHGROUP;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1017 dhchap challenge could not find DH "
++ "Group. \n");
++ return 1;
++ }
++ message += sizeof(uint32_t);
++
++ vport->auth.challenge_len =
++ be32_to_cpu(*(uint32_t *)message); /* Challenge Len */
++ message += sizeof(uint32_t);
++
++ /* copy challenge to vport */
++ if (vport->auth.challenge != NULL) {
++ kfree(vport->auth.challenge);
++ }
++ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL);
++ if (!vport->auth.challenge) {
++ *reason = AUTH_ERR;
++ return 1;
++ }
++ memcpy (vport->auth.challenge, message, vport->auth.challenge_len);
++ message += vport->auth.challenge_len;
++
++ vport->auth.dh_pub_key_len =
++ be32_to_cpu(*(uint32_t *)message); /* DH Value Len */
++ message += sizeof(uint32_t);
++
++ if (vport->auth.dh_pub_key_len != 0) {
++ if (vport->auth.group_id == DH_GROUP_NULL) {
++ *reason = LOGIC_ERR;
++ *explanation = BAD_DHGROUP;
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1018 dhchap challenge No Public key "
++ "for non-NULL DH Group.\n");
++ return 1;
++ }
++
++ /* Copy to the vport to save for authentication */
++ if (vport->auth.dh_pub_key != NULL)
++ kfree(vport->auth.dh_pub_key);
++ vport->auth.dh_pub_key = kmalloc(vport->auth.dh_pub_key_len,
++ GFP_KERNEL);
++ if (!vport->auth.dh_pub_key) {
++ *reason = AUTH_ERR;
++ return 1;
++ }
++ memcpy(vport->auth.dh_pub_key, message,
++ vport->auth.dh_pub_key_len);
++ }
++ return 0;
++}
++
++int
++lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_req *fc_req)
++{
++ uint32_t rsp_len;
++ uint32_t dh_len;
++ uint32_t challenge_len;
++
++ /* Following is the format of the message DHCHAP_Reply.
++ * uint32_t Response Value Length;
++ * uint8_t Response Value[];
++ * uint32_t DH Value Length;
++ * uint8_t DH Value[];
++ * uint32_t Challenge Value Length;
++ * uint8_t Challenge Value[];
++ */
++
++ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */
++ message += sizeof(uint32_t);
++ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len,
++ message, rsp_len);
++ fc_req->u.dhchap_success.received_response_len = rsp_len;
++ message += rsp_len;
++
++ dh_len = be32_to_cpu(*(uint32_t *)message); /* DH Len */
++ message += sizeof(uint32_t);
++ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len +
++ rsp_len, message, dh_len);
++ fc_req->u.dhchap_success.received_public_key_len = dh_len;
++ message += dh_len;
++
++ challenge_len = be32_to_cpu(*(uint32_t *)message); /* Challenge Len */
++ message += sizeof(uint32_t);
++ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len
++ + rsp_len + dh_len,
++ message, challenge_len);
++ fc_req->u.dhchap_success.received_challenge_len = challenge_len;
++ message += challenge_len;
++
++ return (rsp_len + dh_len + challenge_len);
++}
++
++int
++lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_req *fc_req)
++{
++ uint32_t rsp_len = 0;
++
++ /* DHCHAP_Success.
++ * uint32_t responseValueLen;
++ * uint8_t response[];
++ */
++
++ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */
++ message += sizeof(uint32_t);
++ memcpy(fc_req->u.dhchap_success.data + vport->auth.challenge_len,
++ message, rsp_len);
++ fc_req->u.dhchap_success.received_response_len = rsp_len;
++
++ memcpy(fc_req->u.dhchap_success.data +
++ vport->auth.challenge_len + rsp_len,
++ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len);
++
++ fc_req->u.dhchap_success.received_public_key_len =
++ vport->auth.dh_pub_key_len;
++
++ fc_req->u.dhchap_success.received_challenge_len = 0;
++
++ return (vport->auth.challenge_len + rsp_len +
++ vport->auth.dh_pub_key_len);
++ return 0;
++}
++
++int
++lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message)
++{
++ uint8_t *message_start = message;
++ uint8_t *params_start;
++ uint32_t *params_len;
++ uint32_t len;
++ int i;
++
++ /* Because some of the fields are not static in length
++ * and number we will pack on the fly.This will be expanded
++ * in the future to optionally offer DHCHAP or FCAP or both.
++ * The packing is done in Big Endian byte order DHCHAP_Reply.
++ *
++ * uint16_t nameTag;
++ * uint16_t nameLength;
++ * uint8_t name[8];
++ * uint32_t available; For now we will only offer one
++ protocol ( DHCHAP ) for authentication.
++ * uint32_t potocolParamsLenId#1;
++ * uint32_t protocolId#1; 1 : DHCHAP. The protocol list is
++ * in order of preference.
++ * uint16_t parameter#1Tag 1 : HashList
++ * uint16_t parameter#1Len 2 : Count of how many parameter values
++ * follow in order of preference.
++ * uint16_t parameter#1value#1 5 : MD5 Hash Function
++ * uint16_t parameter#1value#2 6 : SHA-1 Hash Function
++ * uint16_t parameter#2Tag 2 : DHglDList
++ * uint16_t parameter#2Len 1 : Only One is supported now
++ * uint16_t parameter#2value#1 0 : NULL DH-CHAP Algorithm
++ * uint16_t parameter#2value#2 ...
++ * uint32_t protocolParamsLenId#2;
++ * uint32_t protocolId#2; 2 = FCAP
++ * uint16_t parameter#1Tag
++ * uint16_t parameter#1Len
++ * uint16_t parameter#1value#1
++ * uint16_t parameter#1value#2 ...
++ * uint16_t parameter#2Tag
++ * uint16_t parameter#2Len
++ * uint16_t parameter#2value#1
++ * uint16_t parameter#2value#2 ...
++ */
++
++
++ /* Name Tag */
++ *((uint16_t *)message) = cpu_to_be16(NAME_TAG);
++ message += sizeof(uint16_t);
++
++ /* Name Len */
++ *((uint16_t *)message) = cpu_to_be16(NAME_LEN);
++ message += sizeof(uint16_t);
++
++ memcpy(message, vport->fc_portname.u.wwn, sizeof(uint64_t));
++
++ message += sizeof(uint64_t);
++
++ /* Protocols Available */
++ *((uint32_t *)message) = cpu_to_be32(PROTS_NUM);
++ message += sizeof(uint32_t);
++
++ /* First Protocol Params Len */
++ params_len = (uint32_t *)message;
++ message += sizeof(uint32_t);
++
++ /* Start of first Param */
++ params_start = message;
++
++ /* Protocol Id */
++ *((uint32_t *)message) = cpu_to_be32(FC_DHCHAP);
++ message += sizeof(uint32_t);
++
++ /* Hash List Tag */
++ *((uint16_t *)message) = cpu_to_be16(HASH_LIST_TAG);
++ message += sizeof(uint16_t);
++
++ /* Hash Value Len */
++ *((uint16_t *)message) = cpu_to_be16(vport->auth.hash_len);
++ message += sizeof(uint16_t);
++
++ /* Hash Value each 4 byte words */
++ for (i = 0; i < vport->auth.hash_len; i++) {
++ *((uint32_t *)message) =
++ cpu_to_be32(vport->auth.hash_priority[i]);
++ message += sizeof(uint32_t);
++ }
++
++ /* DHgIDList Tag */
++ *((uint16_t *)message) = cpu_to_be16(DHGID_LIST_TAG);
++ message += sizeof(uint16_t);
++
++ /* DHgIDListValue Len */
++ *((uint16_t *)message) = cpu_to_be16(vport->auth.dh_group_len);
++
++ message += sizeof(uint16_t);
++
++ /* DHgIDList each 4 byte words */
++
++ for (i = 0; i < vport->auth.dh_group_len; i++) {
++ *((uint32_t *)message) =
++ cpu_to_be32(vport->auth.dh_group_priority[i]);
++ message += sizeof(uint32_t);
++ }
++
++ *params_len = cpu_to_be32(message - params_start);
++
++ len = (uint32_t)(message - message_start);
++
++ return len;
++}
++
++int
++lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp)
++{
++ uint8_t *message_start = message;
++
++ /* Because some of the fields are not static in length and number
++ * we will pack on the fly. The packing is done in Big Endian byte
++ * order DHCHAP_Challenge.
++ *
++ * uint16_t nameTag;
++ * uint16_t nameLength;
++ * uint8_t name[8];
++ * uint32_t Hash_Identifier;
++ * uint32_t DH_Group_Identifier;
++ * uint32_t Challenge_Value_Length;
++ * uint8_t Challenge_Value[];
++ * uint32_t DH_Value_Length;
++ * uint8_t DH_Value[];
++ */
++
++ /* Name Tag */
++ *((uint16_t *)message) = cpu_to_be16(NAME_TAG);
++ message += sizeof(uint16_t);
++
++ /* Name Len */
++ *((uint16_t *)message) = cpu_to_be16(NAME_LEN);
++ message += sizeof(uint16_t);
++
++ memcpy(message, vport->fc_portname.u.wwn, NAME_LEN);
++ message += NAME_LEN;
++
++ /* Hash Value each 4 byte words */
++ *((uint32_t *)message) = cpu_to_be32(vport->auth.hash_id);
++ message += sizeof(uint32_t);
++
++ /* DH group id each 4 byte words */
++ *((uint32_t *)message) = cpu_to_be32(vport->auth.group_id);
++ message += sizeof(uint32_t);
++
++ /* Challenge Length */
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
++ dhchap_challenge.our_challenge_len);
++ message += sizeof(uint32_t);
++
++ /* copy challenge to vport to save */
++ if (vport->auth.challenge)
++ kfree(vport->auth.challenge);
++ vport->auth.challenge_len = fc_rsp->u.
++ dhchap_challenge.our_challenge_len;
++ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL);
++
++ if (!vport->auth.challenge)
++ return 0;
++
++ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_challenge.data,
++ fc_rsp->u.dhchap_challenge.our_challenge_len);
++
++ /* Challenge */
++ memcpy(message, fc_rsp->u.dhchap_challenge.data,
++ fc_rsp->u.dhchap_challenge.our_challenge_len);
++ message += fc_rsp->u.dhchap_challenge.our_challenge_len;
++
++ /* Public Key length */
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
++ dhchap_challenge.our_public_key_len);
++ message += sizeof(uint32_t);
++
++ /* Public Key */
++ memcpy(message, fc_rsp->u.dhchap_challenge.data +
++ fc_rsp->u.dhchap_challenge.our_challenge_len,
++ fc_rsp->u.dhchap_challenge.our_public_key_len);
++ message += fc_rsp->u.dhchap_challenge.our_public_key_len;
++
++ return ((uint32_t)(message - message_start));
++
++}
++
++int
++lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp)
++
++{
++ uint8_t *message_start = message;
++
++ /*
++ * Because some of the fields are not static in length and
++ * number we will pack on the fly. The packing is done in
++ * Big Endian byte order DHCHAP_Reply.
++ *
++ * uint32_t ResonseLength;
++ * uint8_t ResponseValue[];
++ * uint32_t DHLength;
++ * uint8_t DHValue[]; Our Public key
++ * uint32_t ChallengeLength; Used for bi-directional authentication
++ * uint8_t ChallengeValue[];
++ *
++ * The combined key ( g^x mod p )^y mod p is used as the last
++ * hash of the password.
++ *
++ * g is the base 2 or 5.
++ * y is our private key.
++ * ( g^y mod p ) is our public key which we send.
++ * ( g^x mod p ) is their public key which we received.
++ */
++ /* Response Value Length */
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply.
++ our_challenge_rsp_len);
++
++ message += sizeof(uint32_t);
++ /* Response Value */
++ memcpy(message, fc_rsp->u.dhchap_reply.data,
++ fc_rsp->u.dhchap_reply.our_challenge_rsp_len);
++
++ message += fc_rsp->u.dhchap_reply.our_challenge_rsp_len;
++ /* DH Value Length */
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply.
++ our_public_key_len);
++
++ message += sizeof(uint32_t);
++ /* DH Value */
++ memcpy(message, fc_rsp->u.dhchap_reply.data +
++ fc_rsp->u.dhchap_reply.our_challenge_rsp_len,
++ fc_rsp->u.dhchap_reply.our_public_key_len);
++
++ message += fc_rsp->u.dhchap_reply.our_public_key_len;
++
++ if (vport->auth.bidirectional) {
++
++ /* copy to vport to save */
++ if (vport->auth.challenge)
++ kfree(vport->auth.challenge);
++ vport->auth.challenge_len = fc_rsp->u.dhchap_reply.
++ our_challenge_len;
++ vport->auth.challenge = kmalloc(vport->auth.challenge_len,
++ GFP_KERNEL);
++ if (!vport->auth.challenge)
++ return 0;
++
++ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_reply.data +
++ fc_rsp->u.dhchap_reply.our_challenge_rsp_len +
++ fc_rsp->u.dhchap_reply.our_public_key_len,
++ fc_rsp->u.dhchap_reply.our_challenge_len);
++ /* Challenge Value Length */
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
++ dhchap_reply.our_challenge_len);
++ message += sizeof(uint32_t);
++ /* Challenge Value */
++ memcpy(message, fc_rsp->u.dhchap_reply.data +
++ fc_rsp->u.dhchap_reply.our_challenge_rsp_len +
++ fc_rsp->u.dhchap_reply.our_public_key_len,
++ fc_rsp->u.dhchap_reply.our_challenge_len);
++
++ message += fc_rsp->u.dhchap_reply.our_challenge_len;
++
++ } else {
++ *((uint32_t *)message) = 0; /* Challenge Len for No
++ bidirectional authentication */
++ message += sizeof(uint32_t); /* Challenge Value Not Present */
++ }
++
++ return ((uint32_t)(message - message_start));
++
++}
++
++int
++lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp)
++{
++ uint8_t *message_start = message;
++
++ /*
++ * Because some of the fields are not static in length and number
++ * we will pack on the fly. The packing is done in Big Endian byte
++ * order DHCHAP_Success.
++ * uint32_t responseValueLen;
++ * uint8_t response[];.
++ */
++
++ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
++ dhchap_success.response_len);
++ message += sizeof(uint32_t);
++
++ memcpy(message, fc_rsp->u.dhchap_success.data,
++ fc_rsp->u.dhchap_success.response_len);
++ message += fc_rsp->u.dhchap_success.response_len;
++
++ return ((uint32_t)(message - message_start));
++}
++
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_auth.h
+@@ -0,0 +1,92 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#define N_DH_GROUP 4
++#define ELS_CMD_AUTH_BYTE 0x90
++
++#define AUTH_REJECT 0xA
++#define AUTH_NEGOTIATE 0xB
++#define AUTH_DONE 0xC
++
++#define DHCHAP_CHALLENGE 0x10
++#define DHCHAP_REPLY 0x11
++#define DHCHAP_SUCCESS 0x12
++
++#define FCAP_REQUEST 0x13
++#define FCAP_ACK 0x14
++#define FCAP_CONFIRM 0x15
++
++#define PROTS_NUM 0x01
++
++#define NAME_TAG 0x01
++#define NAME_LEN 0x08
++
++#define HASH_LIST_TAG 0x01
++
++#define DHGID_LIST_TAG 0x02
++
++#define HBA_SECURITY 0x20
++
++#define AUTH_ERR 0x1
++#define LOGIC_ERR 0x2
++
++#define BAD_DHGROUP 0x2
++#define BAD_ALGORITHM 0x3
++#define AUTHENTICATION_FAILED 0x5
++#define BAD_PAYLOAD 0x6
++#define BAD_PROTOCOL 0x7
++#define RESTART 0x8
++
++#define AUTH_VERSION 0x1
++
++#define MAX_AUTH_MESSAGE_SIZE 1024
++
++struct lpfc_auth_reject {
++ uint8_t reason;
++ uint8_t explanation;
++ uint8_t reserved[2];
++} __attribute__ ((packed));
++
++struct lpfc_auth_message { /* Structure is in Big Endian format */
++ uint8_t command_code;
++ uint8_t flags;
++ uint8_t message_code;
++ uint8_t protocol_ver;
++ uint32_t message_len;
++ uint32_t trans_id;
++ uint8_t data[0];
++} __attribute__ ((packed));
++
++int lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message);
++int lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp);
++int lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp);
++int lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_rsp *fc_rsp);
++
++int lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message,
++ uint8_t *reason, uint8_t *explanation);
++int lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
++ uint8_t *reason, uint8_t *explanation);
++int lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_req *fc_req);
++int lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
++ struct fc_auth_req *fc_req);
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -21,6 +21,12 @@
+ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
+
+ struct fc_rport;
++int lpfc_issue_els_auth(struct lpfc_vport *, struct lpfc_nodelist *,
++ uint8_t message_code, uint8_t *payload,
++ uint32_t payload_len);
++int lpfc_issue_els_auth_reject(struct lpfc_vport *vport,
++ struct lpfc_nodelist *ndlp,
++ uint8_t reason, uint8_t explanation);
+ void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+@@ -80,7 +86,10 @@ void lpfc_cleanup(struct lpfc_vport *);
+ void lpfc_disc_timeout(unsigned long);
+
+ struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
++struct lpfc_nodelist *lpfc_findnode_wwnn(struct lpfc_vport *,
++ struct lpfc_name *);
+
++void lpfc_port_auth_failed(struct lpfc_nodelist *);
+ void lpfc_worker_wake_up(struct lpfc_hba *);
+ int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
+ int lpfc_do_work(void *);
+@@ -95,6 +104,9 @@ void lpfc_more_plogi(struct lpfc_vport *
+ void lpfc_more_adisc(struct lpfc_vport *);
+ void lpfc_end_rscn(struct lpfc_vport *);
+ int lpfc_els_chk_latt(struct lpfc_vport *);
++struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
++ uint8_t, struct lpfc_nodelist *, uint32_t,
++ uint32_t);
+ int lpfc_els_abort_flogi(struct lpfc_hba *);
+ int lpfc_initial_flogi(struct lpfc_vport *);
+ int lpfc_initial_fdisc(struct lpfc_vport *);
+@@ -117,6 +129,8 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vp
+ void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
+ void lpfc_els_retry_delay(unsigned long);
+ void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
++void lpfc_reauth_node(unsigned long);
++void lpfc_reauthentication_handler(struct lpfc_nodelist *);
+ void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+ struct lpfc_iocbq *);
+ int lpfc_els_handle_rscn(struct lpfc_vport *);
+@@ -258,7 +272,6 @@ void lpfc_free_sysfs_attr(struct lpfc_vp
+ extern struct device_attribute *lpfc_hba_attrs[];
+ extern struct device_attribute *lpfc_vport_attrs[];
+ extern struct scsi_host_template lpfc_template;
+-extern struct scsi_host_template lpfc_vport_template;
+ extern struct fc_function_template lpfc_transport_functions;
+ extern struct fc_function_template lpfc_vport_transport_functions;
+ extern int lpfc_sli_mode;
+@@ -276,6 +289,22 @@ void destroy_port(struct lpfc_vport *);
+ int lpfc_get_instance(void);
+ void lpfc_host_attrib_init(struct Scsi_Host *);
+
++int lpfc_selective_reset(struct lpfc_hba *);
++int lpfc_security_wait(struct lpfc_hba *);
++int lpfc_get_security_enabled(struct Scsi_Host *);
++void lpfc_security_service_online(struct Scsi_Host *);
++void lpfc_security_service_offline(struct Scsi_Host *);
++void lpfc_security_config(struct Scsi_Host *, int status, void *);
++int lpfc_security_config_wait(struct lpfc_vport *vport);
++void lpfc_dhchap_make_challenge(struct Scsi_Host *, int , void *, uint32_t);
++void lpfc_dhchap_make_response(struct Scsi_Host *, int , void *, uint32_t);
++void lpfc_dhchap_authenticate(struct Scsi_Host *, int , void *, uint32_t);
++int lpfc_start_node_authentication(struct lpfc_nodelist *);
++int lpfc_get_auth_config(struct lpfc_nodelist *, struct lpfc_name *);
++void lpfc_start_discovery(struct lpfc_vport *vport);
++void lpfc_start_authentication(struct lpfc_vport *, struct lpfc_nodelist *);
++int lpfc_rcv_nl_msg(struct Scsi_Host *, void *, uint32_t, uint32_t);
++
+ extern void lpfc_debugfs_initialize(struct lpfc_vport *);
+ extern void lpfc_debugfs_terminate(struct lpfc_vport *);
+ extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
+@@ -284,6 +313,11 @@ extern void lpfc_debugfs_slow_ring_trc(s
+ uint32_t, uint32_t);
+ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
+
++extern uint8_t lpfc_security_service_state;
++extern spinlock_t fc_security_user_lock;
++extern struct list_head fc_security_user_list;
++extern int fc_service_state;
++
+ /* Interface exported by fabric iocb scheduler */
+ void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
+ void lpfc_fabric_abort_hba(struct lpfc_hba *);
+@@ -293,6 +327,7 @@ void lpfc_adjust_queue_depth(struct lpfc
+ void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
+ void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
+ void lpfc_scsi_dev_block(struct lpfc_hba *);
++void lpfc_scsi_dev_rescan(struct lpfc_hba *);
+
+ void
+ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
+--- a/drivers/scsi/lpfc/lpfc_disc.h
++++ b/drivers/scsi/lpfc/lpfc_disc.h
+@@ -37,6 +37,7 @@ enum lpfc_work_type {
+ LPFC_EVT_KILL,
+ LPFC_EVT_ELS_RETRY,
+ LPFC_EVT_DEV_LOSS,
++ LPFC_EVT_REAUTH,
+ LPFC_EVT_FASTPATH_MGMT_EVT,
+ };
+
+@@ -99,10 +100,12 @@ struct lpfc_nodelist {
+ #define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
+
+ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
++ struct timer_list nlp_reauth_tmr; /* Used for re-authentication */
+ struct fc_rport *rport; /* Corresponding FC transport
+ port structure */
+ struct lpfc_vport *vport;
+ struct lpfc_work_evt els_retry_evt;
++ struct lpfc_work_evt els_reauth_evt;
+ struct lpfc_work_evt dev_loss_evt;
+ unsigned long last_ramp_up_time; /* jiffy of last ramp up */
+ unsigned long last_q_full_time; /* jiffy of last queue full */
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -38,6 +38,9 @@
+ #include "lpfc_crtn.h"
+ #include "lpfc_vport.h"
+ #include "lpfc_debugfs.h"
++#include "lpfc_auth_access.h"
++#include "lpfc_auth.h"
++#include "lpfc_security.h"
+
+ static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
+@@ -143,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vpo
+ * Pointer to the newly allocated/prepared els iocb data structure
+ * NULL - when els iocb data structure allocation/preparation failed
+ **/
+-static struct lpfc_iocbq *
++struct lpfc_iocbq *
+ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ uint16_t cmdSize, uint8_t retry,
+ struct lpfc_nodelist *ndlp, uint32_t did,
+@@ -653,6 +656,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
+ struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
++ struct lpfc_name wwpn;
+ int rc;
+
+ /* Check to see if link went down during discovery */
+@@ -703,7 +707,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+
+ sp = prsp->virt + sizeof(uint32_t);
+-
++ if (sp->cmn.security)
++ ndlp->nlp_flag |= NLP_SC_REQ;
++ else
++ ndlp->nlp_flag &= ~NLP_SC_REQ;
+ /* FLOGI completes successfully */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0101 FLOGI completes sucessfully "
+@@ -711,6 +718,20 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
+ irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+
++ if (vport->cfg_enable_auth) {
++ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn);
++ if (lpfc_get_auth_config(ndlp, &wwpn))
++ goto flogifail;
++ } else {
++ vport->auth.security_active = 0;
++ if (sp->cmn.security) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1055 Authentication parameter is "
++ "disabled, but is required by "
++ "the fabric.\n");
++ goto flogifail;
++ }
++ }
+ if (vport->port_state == LPFC_FLOGI) {
+ /*
+ * If Common Service Parameters indicate Nport
+@@ -800,6 +821,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *
+ sp = (struct serv_parm *) pcmd;
+
+ /* Setup CSPs accordingly for Fabric */
++
++ if (vport->cfg_enable_auth)
++ sp->cmn.security = 1;
++
+ sp->cmn.e_d_tov = 0;
+ sp->cmn.w2.r_a_tov = 0;
+ sp->cls1.classValid = 0;
+@@ -965,6 +990,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vp
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+
++ if (vport->cfg_enable_auth) {
++ if (lpfc_security_wait(phba)) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1049 Authentication is enabled but "
++ "authentication service is not "
++ "running\n");
++ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
++ return 0;
++ }
++ }
++
+ /* First look for the Fabric ndlp */
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp) {
+@@ -2697,6 +2733,17 @@ lpfc_els_retry(struct lpfc_hba *phba, st
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
+ return 1;
++ case ELS_CMD_AUTH_NEG:
++ case ELS_CMD_DH_CHA:
++ case ELS_CMD_DH_REP:
++ case ELS_CMD_DH_SUC:
++ ndlp->nlp_prev_state = ndlp->nlp_state;
++ ndlp->nlp_state = NLP_STE_NPR_NODE;
++ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
++ "0143 Authentication LS_RJT Logical "
++ "busy\n");
++ lpfc_start_authentication(vport, ndlp);
++ return 1;
+ }
+ }
+ /* No retry ELS command <elsCmd> to remote NPORT <did> */
+@@ -5085,6 +5132,363 @@ lpfc_els_flush_all_cmd(struct lpfc_hba
+ return;
+ }
+
++static void
++lpfc_els_rcv_auth_neg(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_nodelist *ndlp)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
++ struct lpfc_auth_message *authcmd;
++ uint8_t reason, explanation;
++ uint32_t message_len;
++ uint32_t trans_id;
++ struct fc_auth_req *fc_req;
++ struct fc_auth_rsp *fc_rsp;
++
++ authcmd = pcmd->virt;
++ message_len = be32_to_cpu(authcmd->message_len);
++ trans_id = be32_to_cpu(authcmd->trans_id);
++
++ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
++
++ vport->auth.trans_id = trans_id;
++
++ if (lpfc_unpack_auth_negotiate(vport, authcmd->data,
++ &reason, &explanation)) {
++ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
++ return;
++ }
++ vport->auth.direction = AUTH_DIRECTION_NONE;
++ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
++ "1033 Received auth_negotiate from Nport:x%x\n",
++ ndlp->nlp_DID);
++
++ fc_req = kzalloc(sizeof(struct fc_auth_req), GFP_KERNEL);
++
++ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
++ if (ndlp->nlp_type & NLP_FABRIC)
++ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
++ else
++ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
++ fc_req->u.dhchap_challenge.transaction_id = vport->auth.trans_id;
++ fc_req->u.dhchap_challenge.dh_group_id = vport->auth.group_id;
++ fc_req->u.dhchap_challenge.hash_id = vport->auth.hash_id;
++
++ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
++
++ if (lpfc_fc_security_dhchap_make_challenge(shost,
++ fc_req, sizeof(struct fc_auth_req),
++ fc_rsp, MAX_AUTH_RSP_SIZE)) {
++ kfree(fc_rsp);
++ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
++ }
++
++ kfree(fc_req);
++
++}
++
++static void
++lpfc_els_rcv_chap_chal(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_nodelist *ndlp)
++{
++
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
++ struct lpfc_auth_message *authcmd;
++ uint8_t reason, explanation;
++ uint32_t message_len;
++ uint32_t trans_id;
++ struct fc_auth_req *fc_req;
++ struct fc_auth_rsp *fc_rsp;
++ uint32_t fc_req_len;
++
++ authcmd = pcmd->virt;
++ message_len = be32_to_cpu(authcmd->message_len);
++ trans_id = be32_to_cpu(authcmd->trans_id);
++
++ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
++
++ if (vport->auth.auth_msg_state != LPFC_AUTH_NEGOTIATE) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1034 Not Expecting Challenge - Rejecting "
++ "Challenge.\n");
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
++ return;
++ }
++
++ if (trans_id != vport->auth.trans_id) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1035 Transport ID does not match - Rejecting "
++ "Challenge.\n");
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
++ return;
++ }
++
++ if (lpfc_unpack_dhchap_challenge(vport, authcmd->data,
++ &reason, &explanation)) {
++ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
++ return;
++ }
++ vport->auth.direction = AUTH_DIRECTION_NONE;
++
++ fc_req_len = (sizeof(struct fc_auth_req) +
++ vport->auth.challenge_len +
++ vport->auth.dh_pub_key_len);
++ fc_req = kzalloc(fc_req_len, GFP_KERNEL);
++ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
++ if (ndlp->nlp_type & NLP_FABRIC)
++ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
++ else
++ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
++ fc_req->u.dhchap_reply.transaction_id = vport->auth.trans_id;
++ fc_req->u.dhchap_reply.dh_group_id = vport->auth.group_id;
++ fc_req->u.dhchap_reply.hash_id = vport->auth.hash_id;
++ fc_req->u.dhchap_reply.bidirectional = vport->auth.bidirectional;
++ fc_req->u.dhchap_reply.received_challenge_len =
++ vport->auth.challenge_len;
++ fc_req->u.dhchap_reply.received_public_key_len =
++ vport->auth.dh_pub_key_len;
++ memcpy(fc_req->u.dhchap_reply.data, vport->auth.challenge,
++ vport->auth.challenge_len);
++ if (vport->auth.group_id != DH_GROUP_NULL) {
++ memcpy(fc_req->u.dhchap_reply.data + vport->auth.challenge_len,
++ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len);
++ }
++
++ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
++
++ if (lpfc_fc_security_dhchap_make_response(shost,
++ fc_req, fc_req_len,
++ fc_rsp, MAX_AUTH_RSP_SIZE)) {
++ kfree(fc_rsp);
++ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
++ }
++
++ kfree(fc_req);
++
++}
++
++static void
++lpfc_els_rcv_auth_rjt(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_nodelist *ndlp)
++{
++
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
++ struct lpfc_auth_message *authcmd;
++ uint32_t message_len;
++ uint32_t trans_id;
++ struct lpfc_auth_reject *rjt;
++ struct lpfc_hba *phba = vport->phba;
++
++ authcmd = pcmd->virt;
++ rjt = (struct lpfc_auth_reject *)authcmd->data;
++
++ message_len = be32_to_cpu(authcmd->message_len);
++ trans_id = be32_to_cpu(authcmd->trans_id);
++
++ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
++
++ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1036 Authentication transaction reject - "
++ "re-auth request reason 0x%x exp 0x%x\n",
++ rjt->reason, rjt->explanation);
++ lpfc_port_auth_failed(ndlp);
++ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) {
++ /* start authentication */
++ lpfc_start_authentication(vport, ndlp);
++ }
++ } else {
++ if (rjt->reason == LOGIC_ERR && rjt->explanation == RESTART) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1037 Authentication transaction "
++ "reject - restarting authentication. "
++ "reason 0x%x exp 0x%x\n",
++ rjt->reason, rjt->explanation);
++ /* restart auth */
++ lpfc_start_authentication(vport, ndlp);
++ } else {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1057 Authentication transaction "
++ "reject. reason 0x%x exp 0x%x\n",
++ rjt->reason, rjt->explanation);
++ vport->auth.auth_msg_state = LPFC_AUTH_REJECT;
++ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
++ (phba->link_state != LPFC_CLEAR_LA)) {
++ /* If Auth failed enable link interrupt. */
++ lpfc_issue_clear_la(phba, vport);
++ }
++ }
++ }
++}
++
++static void
++lpfc_els_rcv_chap_reply(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_nodelist *ndlp)
++{
++
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
++ struct lpfc_auth_message *authcmd;
++ uint32_t message_len;
++ uint32_t trans_id;
++ struct fc_auth_req *fc_req;
++ struct fc_auth_rsp *fc_rsp;
++ uint32_t data_len;
++
++ authcmd = pcmd->virt;
++ message_len = be32_to_cpu(authcmd->message_len);
++ trans_id = be32_to_cpu(authcmd->trans_id);
++
++ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
++
++ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++
++ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
++ if (ndlp->nlp_type & NLP_FABRIC)
++ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
++ else
++ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
++
++ if (vport->auth.auth_msg_state != LPFC_DHCHAP_CHALLENGE) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1039 Not Expecting Reply - rejecting. State "
++ "0x%x\n", vport->auth.auth_state);
++
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
++ return;
++ }
++
++ if (trans_id != vport->auth.trans_id) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1040 Bad Reply trans_id- rejecting. "
++ "Trans_id: 0x%x Expecting: 0x%x \n",
++ trans_id, vport->auth.trans_id);
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
++ return;
++ }
++
++ /* Zero is a valid length to be returned */
++ data_len = lpfc_unpack_dhchap_reply(vport, authcmd->data, fc_req);
++ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id;
++ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id;
++ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id;
++ fc_req->u.dhchap_success.our_challenge_len = vport->auth.challenge_len;
++ memcpy(fc_req->u.dhchap_success.data, vport->auth.challenge,
++ vport->auth.challenge_len);
++
++ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
++
++ if (lpfc_fc_security_dhchap_authenticate(shost, fc_req,
++ (sizeof(struct fc_auth_req) +
++ data_len + vport->auth.challenge_len),
++ fc_rsp, MAX_AUTH_RSP_SIZE)) {
++ kfree(fc_rsp);
++ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
++ }
++
++ kfree(fc_req);
++
++}
++
++static void
++lpfc_els_rcv_chap_suc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_nodelist *ndlp)
++{
++
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
++ struct lpfc_auth_message *authcmd;
++ uint32_t message_len;
++ uint32_t trans_id;
++ struct fc_auth_req *fc_req;
++ struct fc_auth_rsp *fc_rsp;
++ uint32_t data_len;
++
++ authcmd = pcmd->virt;
++ message_len = be32_to_cpu(authcmd->message_len);
++ trans_id = be32_to_cpu(authcmd->trans_id);
++
++ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
++
++ if (vport->auth.auth_msg_state != LPFC_DHCHAP_REPLY &&
++ vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS_REPLY) {
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
++ return;
++ }
++
++ if (trans_id != vport->auth.trans_id) {
++ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
++ return;
++ }
++
++ if (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY &&
++ vport->auth.bidirectional) {
++
++ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
++ if (!fc_req)
++ return;
++
++ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
++ if (ndlp->nlp_type & NLP_FABRIC)
++ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
++ else
++ fc_req->remote_wwpn =
++ wwn_to_u64(ndlp->nlp_portname.u.wwn);
++ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id;
++ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id;
++ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id;
++ fc_req->u.dhchap_success.our_challenge_len =
++ vport->auth.challenge_len;
++
++ memcpy(fc_req->u.dhchap_success.data, vport->auth.challenge,
++ vport->auth.challenge_len);
++
++ /* Zero is a valid return length */
++ data_len = lpfc_unpack_dhchap_success(vport,
++ authcmd->data,
++ fc_req);
++
++ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
++ if (!fc_rsp)
++ return;
++
++ if (lpfc_fc_security_dhchap_authenticate(shost,
++ fc_req, sizeof(struct fc_auth_req) + data_len,
++ fc_rsp, MAX_AUTH_RSP_SIZE)) {
++ kfree(fc_rsp);
++ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
++ }
++
++ kfree(fc_req);
++
++ } else {
++ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS;
++
++ kfree(vport->auth.challenge);
++ vport->auth.challenge = NULL;
++ vport->auth.challenge_len = 0;
++
++ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
++ vport->auth.auth_state = LPFC_AUTH_SUCCESS;
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1041 Authentication Successful\n");
++
++ lpfc_start_discovery(vport);
++
++ } else {
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1042 Re-Authentication Successful\n");
++ }
++ /* If config requires re-authentication start the timer */
++ vport->auth.last_auth = jiffies;
++ if (vport->auth.reauth_interval)
++ mod_timer(&ndlp->nlp_reauth_tmr, jiffies +
++ vport->auth.reauth_interval * 60 * HZ);
++ }
++ vport->auth.direction |= AUTH_DIRECTION_REMOTE;
++}
++
+ /**
+ * lpfc_send_els_failure_event: Posts an ELS command failure event.
+ * @phba: Pointer to hba context object.
+@@ -5462,6 +5866,48 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
++ case ELS_CMD_AUTH_RJT:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV AUTH_RJT: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++ lpfc_els_rcv_auth_rjt(vport, elsiocb, ndlp);
++ break;
++ case ELS_CMD_AUTH_NEG:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV AUTH_NEG: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++ lpfc_els_rcv_auth_neg(vport, elsiocb, ndlp);
++ break;
++ case ELS_CMD_DH_CHA:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV DH_CHA: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++ lpfc_els_rcv_chap_chal(vport, elsiocb, ndlp);
++ break;
++ case ELS_CMD_DH_REP:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV DH_REP: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++ lpfc_els_rcv_chap_reply(vport, elsiocb, ndlp);
++ break;
++ case ELS_CMD_DH_SUC:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV DH_SUC: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++ lpfc_els_rcv_chap_suc(vport, elsiocb, ndlp);
++ break;
++
++ case ELS_CMD_AUTH_DONE:
++ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
++ "RCV AUTH_DONE: did:x%x/ste:x%x flg:x%x",
++ did, vport->port_state, ndlp->nlp_flag);
++
++
+ default:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
+@@ -5747,7 +6193,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba
+ } else {
+ if (vport == phba->pport)
+ lpfc_issue_fabric_reglogin(vport);
+- else
++ else if (!vport->cfg_enable_auth)
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+
+@@ -5840,6 +6286,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
+ struct lpfc_nodelist *next_np;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_iocbq *piocb;
++ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
++ struct serv_parm *sp;
++ struct lpfc_name wwpn;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
+@@ -5867,11 +6316,26 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto fdisc_failed;
+ }
+- if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
+- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+- lpfc_nlp_put(ndlp);
+- /* giving up on FDISC. Cancel discovery timer */
+- lpfc_can_disctmo(vport);
++ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
++ sp = prsp->virt + sizeof(uint32_t);
++ if (sp->cmn.security)
++ ndlp->nlp_flag |= NLP_SC_REQ;
++ else
++ ndlp->nlp_flag &= ~NLP_SC_REQ;
++ if (vport->cfg_enable_auth) {
++ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn);
++ if (lpfc_get_auth_config(ndlp, &wwpn))
++ goto fdisc_failed;
++ } else {
++ vport->auth.security_active = 0;
++ if (sp->cmn.security) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1056 Authentication mode is "
++ "disabled, but is required "
++ "by the fabric.\n");
++ goto fdisc_failed;
++ }
++ }
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_FABRIC;
+ if (vport->phba->fc_topology == TOPOLOGY_LOOP)
+@@ -5905,7 +6369,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
+
+ if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ lpfc_register_new_vport(phba, vport, ndlp);
+- else
++ else if (!vport->cfg_enable_auth)
+ lpfc_do_scr_ns_plogi(phba, vport);
+ goto out;
+ fdisc_failed:
+@@ -5980,6 +6444,10 @@ lpfc_issue_els_fdisc(struct lpfc_vport *
+ sp->cls2.seqDelivery = 1;
+ sp->cls3.seqDelivery = 1;
+
++ /* Set the security service parameter */
++ if (vport->cfg_enable_auth)
++ sp->cmn.security = 1;
++
+ pcmd += sizeof(uint32_t); /* CSP Word 2 */
+ pcmd += sizeof(uint32_t); /* CSP Word 3 */
+ pcmd += sizeof(uint32_t); /* CSP Word 4 */
+@@ -6474,3 +6942,180 @@ void lpfc_fabric_abort_hba(struct lpfc_h
+ (piocb->iocb_cmpl) (phba, piocb, piocb);
+ }
+ }
++static void
++lpfc_cmpl_els_auth(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_iocbq *rspiocb)
++{
++ IOCB_t *irsp = &rspiocb->iocb;
++ struct lpfc_vport *vport = cmdiocb->vport;
++ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
++
++ /* Check to see if link went down during discovery */
++ if (lpfc_els_chk_latt(vport)) {
++ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
++ lpfc_els_free_iocb(phba, cmdiocb);
++ return;
++ }
++
++ if (irsp->ulpStatus) {
++ if (irsp->ulpStatus == IOSTAT_LS_RJT) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "1043 Authentication LS_RJT\n");
++ }
++ /* Check for retry */
++ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) {
++ if (irsp->ulpStatus != IOSTAT_LS_RJT) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
++ "1045 Issue AUTH_NEG failed."
++ "Status:%x\n",
++ irsp->ulpStatus);
++ }
++ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE) {
++ lpfc_can_disctmo(vport);
++ lpfc_port_auth_failed(ndlp);
++ }
++ }
++ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
++ (phba->link_state != LPFC_CLEAR_LA))
++ lpfc_issue_clear_la(phba, vport);
++ lpfc_els_free_iocb(phba, cmdiocb);
++ return;
++ }
++
++ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS ||
++ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY) {
++
++ kfree(vport->auth.challenge);
++ vport->auth.challenge = NULL;
++ vport->auth.challenge_len = 0;
++ kfree(vport->auth.dh_pub_key);
++ vport->auth.dh_pub_key = NULL;
++ vport->auth.dh_pub_key_len = 0;
++
++ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) {
++ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
++ lpfc_printf_vlog(vport, KERN_WARNING,
++ LOG_SECURITY, "1046 "
++ "Authentication Successful\n");
++ vport->auth.auth_state = LPFC_AUTH_SUCCESS;
++ lpfc_start_discovery(vport);
++ } else {
++ lpfc_printf_vlog(vport, KERN_WARNING,
++ LOG_SECURITY,
++ "1047 Re-Authentication"
++ " Successful\n");
++ }
++ }
++ /* restart authentication timer */
++ vport->auth.last_auth = jiffies;
++ if (vport->auth.reauth_interval)
++ mod_timer(&ndlp->nlp_reauth_tmr,
++ jiffies +
++ vport->auth.reauth_interval * 60 * HZ);
++ }
++ lpfc_els_free_iocb(phba, cmdiocb);
++}
++
++int
++lpfc_issue_els_auth(struct lpfc_vport *vport,
++ struct lpfc_nodelist *ndlp,
++ uint8_t message_code,
++ uint8_t *payload,
++ uint32_t payload_len)
++{
++ struct lpfc_hba *phba = vport->phba;
++ struct lpfc_iocbq *elsiocb;
++ struct lpfc_auth_message *authreq;
++
++ elsiocb = lpfc_prep_els_iocb(vport, 1,
++ sizeof(struct lpfc_auth_message) + payload_len,
++ 0, ndlp, ndlp->nlp_DID, ELS_CMD_AUTH);
++
++ if (!elsiocb)
++ return 1;
++ authreq = (struct lpfc_auth_message *)
++ (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
++ authreq->command_code = ELS_CMD_AUTH_BYTE;
++ authreq->flags = 0;
++ authreq->message_code = message_code;
++ authreq->protocol_ver = AUTH_VERSION;
++ authreq->message_len = cpu_to_be32(payload_len);
++ authreq->trans_id = cpu_to_be32(vport->auth.trans_id);
++ memcpy(authreq->data, payload, payload_len);
++
++ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth;
++
++ if (lpfc_sli_issue_iocb(phba, &phba->sli.ring[LPFC_ELS_RING],
++ elsiocb, 0) == IOCB_ERROR) {
++ lpfc_els_free_iocb(phba, elsiocb);
++ return 1;
++ }
++
++ return 0;
++}
++
++static void
++lpfc_cmpl_els_auth_reject(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
++ struct lpfc_iocbq *rspiocb)
++{
++ struct lpfc_vport *vport = cmdiocb->vport;
++ IOCB_t *irsp = &rspiocb->iocb;
++
++ if (irsp->ulpStatus) {
++ /* Check for retry */
++ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
++ "1048 Issue AUTH_REJECT failed.\n");
++ }
++ } else
++ vport->port_state = LPFC_VPORT_UNKNOWN;
++
++ lpfc_els_free_iocb(phba, cmdiocb);
++}
++
++int
++lpfc_issue_els_auth_reject(struct lpfc_vport *vport,
++ struct lpfc_nodelist *ndlp,
++ uint8_t reason, uint8_t explanation)
++{
++ struct lpfc_hba *phba = vport->phba;
++ struct lpfc_iocbq *elsiocb;
++ struct lpfc_sli_ring *pring;
++ struct lpfc_sli *psli;
++ struct lpfc_auth_message *authreq;
++ struct lpfc_auth_reject *reject;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++
++ vport->auth.auth_msg_state = LPFC_AUTH_REJECT;
++
++ elsiocb = lpfc_prep_els_iocb(vport, 1, sizeof(struct lpfc_auth_message)
++ + sizeof(struct lpfc_auth_reject), 0, ndlp,
++ ndlp->nlp_DID, ELS_CMD_AUTH);
++
++ if (!elsiocb)
++ return 1;
++
++ authreq = (struct lpfc_auth_message *)
++ (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
++ authreq->command_code = ELS_CMD_AUTH_BYTE;
++ authreq->flags = 0;
++ authreq->message_code = AUTH_REJECT;
++ authreq->protocol_ver = AUTH_VERSION;
++ reject = (struct lpfc_auth_reject *)authreq->data;
++ memset(reject, 0, sizeof(struct lpfc_auth_reject));
++ reject->reason = reason;
++ reject->explanation = explanation;
++
++ authreq->message_len = cpu_to_be32(sizeof(struct lpfc_auth_reject));
++ authreq->trans_id = cpu_to_be32(vport->auth.trans_id);
++ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth_reject;
++
++ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
++ lpfc_els_free_iocb(phba, elsiocb);
++ return 1;
++ }
++
++ return 0;
++}
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -217,18 +217,81 @@ struct lpfc_stats {
+ uint32_t fcpLocalErr;
+ };
+
++struct lpfc_dmabufext {
++ struct lpfc_dmabuf dma;
++ uint32_t size;
++ uint32_t flag;
++};
++
+ enum sysfs_mbox_state {
+ SMBOX_IDLE,
+ SMBOX_WRITING,
+- SMBOX_READING
++ SMBOX_WRITING_MBEXT,
++ SMBOX_READING_MBEXT,
++ SMBOX_READING,
++ SMBOX_WRITING_BUFF,
++ SMBOX_READING_BUFF
++};
++
++struct lpfc_sysfs_mbox_data {
++ MAILBOX_t mbox;
++ uint32_t mboffset;
++ uint32_t in_ext_wlen;
++ uint32_t out_ext_wlen;
+ };
+
+ struct lpfc_sysfs_mbox {
++ struct lpfc_sysfs_mbox_data mbox_data;
+ enum sysfs_mbox_state state;
+ size_t offset;
+ struct lpfcMboxq * mbox;
++ /* process id of the mgmt application */
++ pid_t pid;
++ struct list_head list;
++ uint8_t * mbext;
++ uint32_t extoff;
++ struct lpfc_dmabuf * txmit_buff;
++ struct lpfc_dmabuf * rcv_buff;
++};
++#define MENLO_DID 0x0000FC0E
++
++enum sysfs_menlo_state {
++ SMENLO_IDLE,
++ SMENLO_WRITING,
++ SMENLO_WRITING_MBEXT,
++ SMENLO_READING
++};
++
++struct lpfc_sysfs_menlo_hdr {
++ uint32_t cmd;
++ uint32_t cmdsize;
++ uint32_t rspsize;
++};
++
++struct lpfc_menlo_genreq64 {
++ size_t offset;
++ struct lpfc_iocbq *cmdiocbq;
++ struct lpfc_iocbq *rspiocbq;
++ struct lpfc_dmabuf *bmp;
++ struct lpfc_dmabufext *indmp;
++ struct ulp_bde64 *cmdbpl;
++ struct lpfc_dmabufext *outdmp;
++ uint32_t timeout;
++ struct list_head inhead;
++ struct list_head outhead;
+ };
+
++struct lpfc_sysfs_menlo {
++ enum sysfs_menlo_state state;
++ /* process id of the mgmt application */
++ struct lpfc_sysfs_menlo_hdr cmdhdr;
++ struct lpfc_menlo_genreq64 cr;
++ struct lpfc_menlo_genreq64 cx;
++ pid_t pid;
++ struct list_head list;
++};
++
++
+ struct lpfc_hba;
+
+
+@@ -261,6 +324,52 @@ enum hba_state {
+ LPFC_HBA_ERROR = -1
+ };
+
++enum auth_state {
++ LPFC_AUTH_UNKNOWN = 0,
++ LPFC_AUTH_SUCCESS = 1,
++ LPFC_AUTH_FAIL = 2,
++};
++enum auth_msg_state {
++ LPFC_AUTH_NONE = 0,
++ LPFC_AUTH_REJECT = 1, /* Sent a Reject */
++ LPFC_AUTH_NEGOTIATE = 2, /* Auth Negotiate */
++ LPFC_DHCHAP_CHALLENGE = 3, /* Challenge */
++ LPFC_DHCHAP_REPLY = 4, /* Reply */
++ LPFC_DHCHAP_SUCCESS_REPLY = 5, /* Success with Reply */
++ LPFC_DHCHAP_SUCCESS = 6, /* Success */
++ LPFC_AUTH_DONE = 7,
++};
++
++struct lpfc_auth {
++ uint8_t auth_mode;
++ uint8_t bidirectional;
++ uint8_t hash_priority[4];
++ uint32_t hash_len;
++ uint8_t dh_group_priority[8];
++ uint32_t dh_group_len;
++ uint32_t reauth_interval;
++
++ uint8_t security_active;
++ enum auth_state auth_state;
++ enum auth_msg_state auth_msg_state;
++ uint32_t trans_id; /* current transaction id. Can be set
++ by incomming transactions as well */
++ uint32_t group_id;
++ uint32_t hash_id;
++ uint32_t direction;
++#define AUTH_DIRECTION_NONE 0
++#define AUTH_DIRECTION_REMOTE 0x1
++#define AUTH_DIRECTION_LOCAL 0x2
++#define AUTH_DIRECTION_BIDI (AUTH_DIRECTION_LOCAL|AUTH_DIRECTION_REMOTE)
++
++ uint8_t *challenge;
++ uint32_t challenge_len;
++ uint8_t *dh_pub_key;
++ uint32_t dh_pub_key_len;
++
++ unsigned long last_auth;
++};
++
+ struct lpfc_vport {
+ struct list_head listentry;
+ struct lpfc_hba *phba;
+@@ -356,6 +465,14 @@ struct lpfc_vport {
+ #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
+ char *vname; /* Application assigned name */
+
++ /* Fields used for accessing auth service */
++ struct lpfc_auth auth;
++ uint32_t sc_tran_id;
++ struct list_head sc_response_wait_queue;
++ struct list_head sc_users;
++ struct work_struct sc_online_work;
++ struct work_struct sc_offline_work;
++
+ /* Vport Config Parameters */
+ uint32_t cfg_scan_down;
+ uint32_t cfg_lun_queue_depth;
+@@ -371,6 +488,7 @@ struct lpfc_vport {
+ uint32_t cfg_max_luns;
+ uint32_t cfg_enable_da_id;
+ uint32_t cfg_max_scsicmpl_time;
++ uint32_t cfg_enable_auth;
+
+ uint32_t dev_loss_tmo_changed;
+
+@@ -445,6 +563,7 @@ struct lpfc_hba {
+ struct lpfc_dmabuf slim2p;
+
+ MAILBOX_t *mbox;
++ uint32_t *mbox_ext;
+ uint32_t *inb_ha_copy;
+ uint32_t *inb_counter;
+ uint32_t inb_last_counter;
+@@ -573,7 +692,9 @@ struct lpfc_hba {
+ uint64_t fc4OutputRequests;
+ uint64_t fc4ControlRequests;
+
+- struct lpfc_sysfs_mbox sysfs_mbox;
++ /* List of mailbox commands issued through sysfs */
++ struct list_head sysfs_mbox_list;
++ struct list_head sysfs_menlo_list;
+
+ /* fastpath list. */
+ spinlock_t scsi_buf_list_lock;
+@@ -595,11 +716,13 @@ struct lpfc_hba {
+ struct fc_host_statistics link_stats;
+ enum intr_type_t intr_type;
+ struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
++ struct lpfcdfc_host *dfc_host;
+
+ struct list_head port_list;
+ struct lpfc_vport *pport; /* physical lpfc_vport pointer */
+ uint16_t max_vpi; /* Maximum virtual nports */
+-#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
++#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
++#define LPFC_INTR_VPI 100 /* Intermediate VPI supported */
+ unsigned long *vpi_bmask; /* vpi allocation table */
+
+ /* Data structure used by fabric iocb scheduler */
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -59,6 +59,47 @@ static uint8_t lpfcAlpaArray[] = {
+
+ static void lpfc_disc_timeout_handler(struct lpfc_vport *);
+ static void lpfc_disc_flush_list(struct lpfc_vport *vport);
++void
++lpfc_start_discovery(struct lpfc_vport *vport)
++{
++ struct lpfc_hba *phba = vport->phba;
++ struct lpfc_vport **vports;
++ int i;
++
++ if (vport->auth.security_active &&
++ vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
++ "0285 Authentication not complete.\n");
++ return;
++ }
++ if (vport->port_type == LPFC_NPIV_PORT) {
++ lpfc_do_scr_ns_plogi(phba, vport);
++ return;
++ }
++
++ vports = lpfc_create_vport_work_array(phba);
++ if (vports != NULL)
++ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
++ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
++ continue;
++ if (phba->fc_topology == TOPOLOGY_LOOP) {
++ lpfc_vport_set_state(vports[i],
++ FC_VPORT_LINKDOWN);
++ continue;
++ }
++ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
++ lpfc_initial_fdisc(vports[i]);
++ else {
++ lpfc_vport_set_state(vports[i],
++ FC_VPORT_NO_FABRIC_SUPP);
++ lpfc_printf_vlog(vports[i], KERN_ERR, LOG_ELS,
++ "0259 No NPIV Fabric "
++ "support\n");
++ }
++ }
++ lpfc_destroy_vport_work_array(phba, vports);
++ lpfc_do_scr_ns_plogi(phba, vport);
++}
+
+ void
+ lpfc_terminate_rport_io(struct fc_rport *rport)
+@@ -416,6 +457,15 @@ lpfc_work_list_done(struct lpfc_hba *phb
+ */
+ lpfc_nlp_put(ndlp);
+ break;
++ case LPFC_EVT_REAUTH:
++ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
++ lpfc_reauthentication_handler(ndlp);
++ free_evt = 0; /* evt is part of ndlp */
++ /* decrement the node reference count held
++ * for this queued work
++ */
++ lpfc_nlp_put(ndlp);
++ break;
+ case LPFC_EVT_DEV_LOSS:
+ ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
+ lpfc_dev_loss_tmo_handler(ndlp);
+@@ -648,6 +698,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vpo
+ continue;
+ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ continue;
++ /* Stop re-authentication timer of all nodes. */
++ del_timer_sync(&ndlp->nlp_reauth_tmr);
++
+ if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
+ ((vport->port_type == LPFC_NPIV_PORT) &&
+ (ndlp->nlp_DID == NameServer_DID)))
+@@ -697,6 +750,23 @@ lpfc_linkdown_port(struct lpfc_vport *vp
+
+ lpfc_port_link_failure(vport);
+
++ vport->auth.auth_state = LPFC_AUTH_UNKNOWN;
++ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
++}
++
++void
++lpfc_port_auth_failed(struct lpfc_nodelist *ndlp)
++{
++ struct lpfc_vport *vport = ndlp->vport;
++
++ vport->auth.auth_state = LPFC_AUTH_FAIL;
++ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
++ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
++ if (ndlp->nlp_type & NLP_FABRIC) {
++ lpfc_port_link_failure(vport);
++ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
++ lpfc_issue_els_logo(vport, ndlp, 0);
++ }
+ }
+
+ int
+@@ -801,7 +871,6 @@ lpfc_linkup_port(struct lpfc_vport *vpor
+ return;
+
+ fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+-
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
+ FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+@@ -1424,8 +1493,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp
+ MAILBOX_t *mb = &pmb->mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
+ struct lpfc_nodelist *ndlp;
+- struct lpfc_vport **vports;
+- int i;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ pmb->context1 = NULL;
+@@ -1463,33 +1530,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+- if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+- vports = lpfc_create_vport_work_array(phba);
+- if (vports != NULL)
+- for(i = 0;
+- i <= phba->max_vpi && vports[i] != NULL;
+- i++) {
+- if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+- continue;
+- if (phba->fc_topology == TOPOLOGY_LOOP) {
+- lpfc_vport_set_state(vports[i],
+- FC_VPORT_LINKDOWN);
+- continue;
+- }
+- if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+- lpfc_initial_fdisc(vports[i]);
+- else {
+- lpfc_vport_set_state(vports[i],
+- FC_VPORT_NO_FABRIC_SUPP);
+- lpfc_printf_vlog(vport, KERN_ERR,
+- LOG_ELS,
+- "0259 No NPIV "
+- "Fabric support\n");
+- }
+- }
+- lpfc_destroy_vport_work_array(phba, vports);
+- lpfc_do_scr_ns_plogi(phba, vport);
+- }
++ if (vport->port_state == LPFC_FABRIC_CFG_LINK &&
++ !vport->cfg_enable_auth)
++ lpfc_start_discovery(vport);
+
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+@@ -1894,9 +1937,13 @@ lpfc_enable_node(struct lpfc_vport *vpor
+ sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
++ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
++ init_timer(&ndlp->nlp_reauth_tmr);
++ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
++ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->vport = vport;
+ ndlp->nlp_sid = NLP_NO_SID;
+@@ -2264,9 +2311,12 @@ lpfc_cleanup_node(struct lpfc_vport *vpo
+
+ ndlp->nlp_last_elscmd = 0;
+ del_timer_sync(&ndlp->nlp_delayfunc);
++ del_timer_sync(&ndlp->nlp_reauth_tmr);
+
+ list_del_init(&ndlp->els_retry_evt.evt_listp);
+ list_del_init(&ndlp->dev_loss_evt.evt_listp);
++ if (!list_empty(&ndlp->els_reauth_evt.evt_listp))
++ list_del_init(&ndlp->els_reauth_evt.evt_listp);
+
+ lpfc_unreg_rpi(vport, ndlp);
+
+@@ -3073,7 +3123,14 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist
+ sizeof(ndlp->nlp_portname)) == 0;
+ }
+
+-static struct lpfc_nodelist *
++static int
++lpfc_filter_by_wwnn(struct lpfc_nodelist *ndlp, void *param)
++{
++ return memcmp(&ndlp->nlp_nodename, param,
++ sizeof(ndlp->nlp_nodename)) == 0;
++}
++
++struct lpfc_nodelist *
+ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
+ {
+ struct lpfc_nodelist *ndlp;
+@@ -3086,6 +3143,22 @@ __lpfc_find_node(struct lpfc_vport *vpor
+ }
+
+ /*
++ * Search node lists for a remote port matching filter criteria
++ * Caller needs to hold host_lock before calling this routine.
++ */
++struct lpfc_nodelist *
++lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_nodelist *ndlp;
++
++ spin_lock_irq(shost->host_lock);
++ ndlp = __lpfc_find_node(vport, filter, param);
++ spin_unlock_irq(shost->host_lock);
++ return ndlp;
++}
++
++/*
+ * This routine looks up the ndlp lists for the given RPI. If rpi found it
+ * returns the node list element pointer else return NULL.
+ */
+@@ -3111,6 +3184,21 @@ lpfc_findnode_wwpn(struct lpfc_vport *vp
+ return ndlp;
+ }
+
++/*
++ * This routine looks up the ndlp lists for the given WWNN. If WWNN found it
++ * returns the node element list pointer else return NULL.
++ */
++struct lpfc_nodelist *
++lpfc_findnode_wwnn(struct lpfc_vport *vport, struct lpfc_name *wwnn)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
++ struct lpfc_nodelist *ndlp;
++
++ spin_lock_irq(shost->host_lock);
++ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwnn, wwnn);
++ spin_unlock_irq(shost->host_lock);
++ return ndlp;
++}
+ void
+ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ uint32_t did)
+@@ -3118,9 +3206,13 @@ lpfc_nlp_init(struct lpfc_vport *vport,
+ memset(ndlp, 0, sizeof (struct lpfc_nodelist));
+ INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
+ INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
++ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
+ init_timer(&ndlp->nlp_delayfunc);
+ ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
+ ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
++ init_timer(&ndlp->nlp_reauth_tmr);
++ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
++ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
+ ndlp->nlp_DID = did;
+ ndlp->vport = vport;
+ ndlp->nlp_sid = NLP_NO_SID;
+--- a/drivers/scsi/lpfc/lpfc_hw.h
++++ b/drivers/scsi/lpfc/lpfc_hw.h
+@@ -64,6 +64,7 @@
+ #define SLI3_IOCB_CMD_SIZE 128
+ #define SLI3_IOCB_RSP_SIZE 64
+
++#define BUF_SZ_4K 4096
+
+ /* Common Transport structures and definitions */
+
+@@ -350,7 +351,8 @@ struct csp {
+
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
++ uint16_t security:1; /* FC Word 1, bit 21 */
++ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+@@ -367,7 +369,8 @@ struct csp {
+ uint16_t payloadlength:1; /* FC Word 1, bit 16 */
+ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
+ uint16_t dhd:1; /* FC Word 1, bit 18 */
+- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
++ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */
++ uint16_t security:1; /* FC Word 1, bit 21 */
+ uint16_t simplex:1; /* FC Word 1, bit 22 */
+ uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ #endif
+@@ -506,6 +509,17 @@ struct serv_parm { /* Structure is in Bi
+ #define ELS_CMD_SCR 0x62000000
+ #define ELS_CMD_RNID 0x78000000
+ #define ELS_CMD_LIRR 0x7A000000
++/*
++ * ELS commands for authentication
++ * ELS_CMD_AUTH<<24 | AUTH_NEGOTIATE<<8 | AUTH_VERSION
++ */
++#define ELS_CMD_AUTH 0x90000000
++#define ELS_CMD_AUTH_RJT 0x90000A01
++#define ELS_CMD_AUTH_NEG 0x90000B01
++#define ELS_CMD_AUTH_DONE 0x90000C01
++#define ELS_CMD_DH_CHA 0x90001001
++#define ELS_CMD_DH_REP 0x90001101
++#define ELS_CMD_DH_SUC 0x90001201
+ #else /* __LITTLE_ENDIAN_BITFIELD */
+ #define ELS_CMD_MASK 0xffff
+ #define ELS_RSP_MASK 0xff
+@@ -542,6 +556,17 @@ struct serv_parm { /* Structure is in Bi
+ #define ELS_CMD_SCR 0x62
+ #define ELS_CMD_RNID 0x78
+ #define ELS_CMD_LIRR 0x7A
++/*
++ * ELS commands for authentication
++ * ELS_CMD_AUTH | AUTH_NEGOTIATE<<16 | AUTH_VERSION<<24
++ */
++#define ELS_CMD_AUTH 0x00000090
++#define ELS_CMD_AUTH_RJT 0x010A0090
++#define ELS_CMD_AUTH_NEG 0x010B0090
++#define ELS_CMD_AUTH_DONE 0x010C0090
++#define ELS_CMD_DH_CHA 0x01100090
++#define ELS_CMD_DH_REP 0x01110090
++#define ELS_CMD_DH_SUC 0x01120090
+ #endif
+
+ /*
+@@ -1319,6 +1344,9 @@ typedef struct { /* FireFly BIU registe
+ #define MBX_HEARTBEAT 0x31
+ #define MBX_WRITE_VPARMS 0x32
+ #define MBX_ASYNCEVT_ENABLE 0x33
++#define MBX_READ_EVENT_LOG_STATUS 0x37
++#define MBX_READ_EVENT_LOG 0x38
++#define MBX_WRITE_EVENT_LOG 0x39
+
+ #define MBX_PORT_CAPABILITIES 0x3B
+ #define MBX_PORT_IOV_CONTROL 0x3C
+@@ -1457,6 +1485,7 @@ typedef struct { /* FireFly BIU registe
+ #define MBXERR_BAD_RCV_LENGTH 14
+ #define MBXERR_DMA_ERROR 15
+ #define MBXERR_ERROR 16
++#define MBXERR_UNKNOWN_CMD 18
+ #define MBX_NOT_FINISHED 255
+
+ #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
+@@ -1624,6 +1653,13 @@ typedef struct {
+ } un;
+ } BIU_DIAG_VAR;
+
++/* Structure for MB command READ_EVENT_LOG (0x38) */
++typedef struct {
++ uint32_t rsvd1;
++ uint32_t offset;
++ struct ulp_bde64 rcv_bde64;
++}READ_EVENT_LOG_VAR;
++
+ /* Structure for MB Command INIT_LINK (05) */
+
+ typedef struct {
+@@ -2744,6 +2780,10 @@ typedef struct {
+ /* Union of all Mailbox Command types */
+ #define MAILBOX_CMD_WSIZE 32
+ #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
++#define MAILBOX_EXT_WSIZE 512
++#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
++#define MAILBOX_HBA_EXT_OFFSET 0x100
++#define MAILBOX_MAX_XMIT_SIZE 1024
+
+ typedef union {
+ uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
+@@ -2783,6 +2823,7 @@ typedef union {
+ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
+ ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
+ struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
++ READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 (READ_EVENT_LOG) */
+ } MAILVARIANTS;
+
+ /*
+@@ -3364,14 +3405,16 @@ typedef struct _IOCB { /* IOCB structure
+ #define SLI1_SLIM_SIZE (4 * 1024)
+
+ /* Up to 498 IOCBs will fit into 16k
+- * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
++ * 256 (MAILBOX_t) + 512 mailbox extension +
++ * 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
+ */
+ #define SLI2_SLIM_SIZE (64 * 1024)
+
+ /* Maximum IOCBs that will fit in SLI2 slim */
+ #define MAX_SLI2_IOCB 498
+ #define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
+- (sizeof(MAILBOX_t) + sizeof(PCB_t)))
++ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
++ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
+
+ /* HBQ entries are 4 words each = 4k */
+ #define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
+@@ -3379,6 +3422,7 @@ typedef struct _IOCB { /* IOCB structure
+
+ struct lpfc_sli2_slim {
+ MAILBOX_t mbx;
++ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
+ PCB_t pcb;
+ IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
+ };
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -44,7 +44,22 @@
+ #include "lpfc_crtn.h"
+ #include "lpfc_vport.h"
+ #include "lpfc_version.h"
+-
++#include "lpfc_auth_access.h"
++#include "lpfc_security.h"
++#include <net/sock.h>
++#include <linux/netlink.h>
++
++/* vendor ID used in SCSI netlink calls */
++#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
++const char *security_work_q_name = "fc_sc_wq";
++extern struct workqueue_struct *security_work_q;
++extern struct list_head fc_security_user_list;
++extern int fc_service_state;
++void lpfc_fc_sc_security_online(struct work_struct *work);
++void lpfc_fc_sc_security_offline(struct work_struct *work);
++int lpfc_fc_queue_security_work(struct lpfc_vport *, struct work_struct *);
++void lpfc_rcv_nl_event(struct notifier_block *, unsigned long , void *);
++#include "lpfc_ioctl.h"
+ static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+ static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
+ static int lpfc_post_rcv_buf(struct lpfc_hba *);
+@@ -54,6 +69,26 @@ static struct scsi_transport_template *l
+ static DEFINE_IDR(lpfc_hba_index);
+
+ /**
++ * lpfc_hba_max_vpi - Get the maximum supported VPI for an HBA
++ * @device: The PCI device ID for this HBA
++ *
++ * Description:
++ * This routine will return the maximum supported VPI limit for each HBA. In
++ * most cases the maximum VPI limit will be 0xFFFF, which indicates that the
++ * driver supports whatever the HBA can support. In some cases the driver
++ * supports fewer VPI that the HBA supports.
++ */
++static inline uint16_t
++lpfc_hba_max_vpi(unsigned short device)
++{
++ if ((device == PCI_DEVICE_ID_HELIOS) ||
++ (device == PCI_DEVICE_ID_ZEPHYR))
++ return LPFC_INTR_VPI;
++ else
++ return LPFC_MAX_VPI;
++}
++
++/**
+ * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+@@ -444,9 +479,20 @@ lpfc_config_port_post(struct lpfc_hba *p
+ /* Set up error attention (ERATT) polling timer */
+ mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
++ if (vport->cfg_enable_auth) {
++ if (lpfc_security_service_state == SECURITY_OFFLINE) {
++ lpfc_printf_log(vport->phba, KERN_ERR, LOG_SECURITY,
++ "1000 Authentication is enabled but "
++ "authentication service is not running\n");
++ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
++ phba->link_state = LPFC_HBA_ERROR;
++ mempool_free(pmb, phba->mbox_mem_pool);
++ return 0;
++ }
++ }
++
+ lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+- lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+@@ -886,8 +932,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data),
+ (char *) &temp_event_data,
+- SCSI_NL_VID_TYPE_PCI
+- | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ spin_lock_irq(&phba->hbalock);
+ phba->over_temp_state = HBA_OVER_TEMP;
+@@ -909,7 +954,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
++ LPFC_NL_VENDOR_ID);
+
+ lpfc_offline_eratt(phba);
+ }
+@@ -1675,8 +1720,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
+ void
+ lpfc_stop_vport_timers(struct lpfc_vport *vport)
+ {
++ struct fc_security_request *fc_sc_req;
+ del_timer_sync(&vport->els_tmofunc);
+ del_timer_sync(&vport->fc_fdmitmo);
++ while (!list_empty(&vport->sc_response_wait_queue)) {
++ fc_sc_req = list_get_first(&vport->sc_response_wait_queue,
++ struct fc_security_request, rlist);
++ del_timer_sync(&fc_sc_req->timer);
++ kfree(fc_sc_req);
++ }
+ lpfc_can_disctmo(vport);
+ return;
+ }
+@@ -1963,12 +2015,7 @@ lpfc_create_port(struct lpfc_hba *phba,
+ struct Scsi_Host *shost;
+ int error = 0;
+
+- if (dev != &phba->pcidev->dev)
+- shost = scsi_host_alloc(&lpfc_vport_template,
+- sizeof(struct lpfc_vport));
+- else
+- shost = scsi_host_alloc(&lpfc_template,
+- sizeof(struct lpfc_vport));
++ shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
+ if (!shost)
+ goto out;
+
+@@ -2017,6 +2064,15 @@ lpfc_create_port(struct lpfc_hba *phba,
+ error = scsi_add_host(shost, dev);
+ if (error)
+ goto out_put_shost;
++ vport->auth.challenge = NULL;
++ vport->auth.challenge_len = 0;
++ vport->auth.dh_pub_key = NULL;
++ vport->auth.dh_pub_key_len = 0;
++
++ INIT_WORK(&vport->sc_online_work, lpfc_fc_sc_security_online);
++ INIT_WORK(&vport->sc_offline_work, lpfc_fc_sc_security_offline);
++ INIT_LIST_HEAD(&vport->sc_users);
++ INIT_LIST_HEAD(&vport->sc_response_wait_queue);
+
+ spin_lock_irq(&phba->hbalock);
+ list_add_tail(&vport->listentry, &phba->port_list);
+@@ -2387,7 +2443,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
+ * establish the host.
+ */
+ lpfc_get_cfgparam(phba);
+- phba->max_vpi = LPFC_MAX_VPI;
++ phba->max_vpi = lpfc_hba_max_vpi(phba->pcidev->device);
+
+ /* Initialize timers used by driver */
+ init_timer(&phba->hb_tmofunc);
+@@ -2453,6 +2509,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
+
+ memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+ phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
++ phba->mbox_ext = (phba->slim2p.virt +
++ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
+ phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+ phba->IOCBs = (phba->slim2p.virt +
+ offsetof(struct lpfc_sli2_slim, IOCBs));
+@@ -2548,11 +2606,30 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
+ /* Initialize list to save ELS buffers */
+ INIT_LIST_HEAD(&phba->elsbuf);
+
++ /* Initialize list of sysfs mailbox commands */
++ INIT_LIST_HEAD(&phba->sysfs_mbox_list);
++ /* Initialize list of sysfs menlo commands */
++ INIT_LIST_HEAD(&phba->sysfs_menlo_list);
++
+ vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+ if (!vport)
+ goto out_kthread_stop;
+
+ shost = lpfc_shost_from_vport(vport);
++
++ if ((lpfc_get_security_enabled)(shost)) {
++ unsigned long flags;
++ /* Triggers fcauthd to register if it is running */
++ fc_host_post_event(shost, fc_get_event_number(),
++ FCH_EVT_PORT_ONLINE, shost->host_no);
++ spin_lock_irqsave(&fc_security_user_lock, flags);
++ list_add_tail(&vport->sc_users, &fc_security_user_list);
++ spin_unlock_irqrestore(&fc_security_user_lock, flags);
++ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) {
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_online_work);
++ }
++ }
+ phba->pport = vport;
+ lpfc_debugfs_initialize(vport);
+
+@@ -2610,6 +2687,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
+ phba->intr_type = INTx;
+ }
+
++ phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba);
++ if (!phba->dfc_host) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1201 Failed to allocate dfc_host \n");
++ error = -ENOMEM;
++ goto out_free_irq;
++ }
++
+ if (lpfc_alloc_sysfs_attr(vport)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1476 Failed to allocate sysfs attr\n");
+@@ -2658,6 +2743,8 @@ out_remove_device:
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(shost->host_lock);
+ out_free_irq:
++ if (phba->dfc_host)
++ lpfcdfc_host_del(phba->dfc_host);
+ lpfc_stop_phba_timers(phba);
+ phba->pport->work_port_events = 0;
+
+@@ -2720,6 +2807,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev
+ struct lpfc_hba *phba = vport->phba;
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
++ /* In case PCI channel permanently disabled, rescan SCSI devices */
++ if (pdev->error_state == pci_channel_io_perm_failure)
++ lpfc_scsi_dev_rescan(phba);
++ lpfcdfc_host_del(phba->dfc_host);
++ phba->dfc_host = NULL;
++
+ spin_lock_irq(&phba->hbalock);
+ vport->load_flag |= FC_UNLOADING;
+ spin_unlock_irq(&phba->hbalock);
+@@ -3076,12 +3169,34 @@ lpfc_init(void)
+ return -ENOMEM;
+ }
+ }
++ error = scsi_nl_add_driver(LPFC_NL_VENDOR_ID, &lpfc_template,
++ lpfc_rcv_nl_msg, lpfc_rcv_nl_event);
++ if (error)
++ goto out_release_transport;
++ security_work_q = create_singlethread_workqueue(security_work_q_name);
++ if (!security_work_q)
++ goto out_nl_remove_driver;
++ INIT_LIST_HEAD(&fc_security_user_list);
+ error = pci_register_driver(&lpfc_driver);
+- if (error) {
+- fc_release_transport(lpfc_transport_template);
+- if (lpfc_enable_npiv)
+- fc_release_transport(lpfc_vport_transport_template);
+- }
++ if (error)
++ goto out_destroy_workqueue;
++ error = lpfc_cdev_init();
++ if (error)
++ goto out_pci_unregister;
++
++ return error;
++
++out_pci_unregister:
++ pci_unregister_driver(&lpfc_driver);
++out_destroy_workqueue:
++ destroy_workqueue(security_work_q);
++ security_work_q = NULL;
++out_nl_remove_driver:
++ scsi_nl_remove_driver(LPFC_NL_VENDOR_ID);
++out_release_transport:
++ fc_release_transport(lpfc_transport_template);
++ if (lpfc_enable_npiv)
++ fc_release_transport(lpfc_vport_transport_template);
+
+ return error;
+ }
+@@ -3097,9 +3212,14 @@ static void __exit
+ lpfc_exit(void)
+ {
+ pci_unregister_driver(&lpfc_driver);
++ if (security_work_q)
++ destroy_workqueue(security_work_q);
++ security_work_q = NULL;
++ scsi_nl_remove_driver(LPFC_NL_VENDOR_ID);
+ fc_release_transport(lpfc_transport_template);
+ if (lpfc_enable_npiv)
+ fc_release_transport(lpfc_vport_transport_template);
++ lpfc_cdev_exit();
+ }
+
+ module_init(lpfc_init);
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_ioctl.c
+@@ -0,0 +1,2519 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#include <linux/delay.h>
++#include <linux/blkdev.h>
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_transport_fc.h>
++
++#include "lpfc_hw.h"
++#include "lpfc_sli.h"
++#include "lpfc_nl.h"
++#include "lpfc_disc.h"
++#include "lpfc_scsi.h"
++#include "lpfc.h"
++#include "lpfc_crtn.h"
++#include "lpfc_ioctl.h"
++#include "lpfc_logmsg.h"
++#include "lpfc_vport.h"
++
++
++struct lpfcdfc_event {
++ struct list_head node;
++ int ref;
++ wait_queue_head_t wq;
++
++ /* Event type and waiter identifiers */
++ uint32_t type_mask;
++ uint32_t req_id;
++ uint32_t reg_id;
++
++ /* next two flags are here for the auto-delete logic */
++ unsigned long wait_time_stamp;
++ int waiting;
++
++ /* seen and not seen events */
++ struct list_head events_to_get;
++ struct list_head events_to_see;
++};
++
++struct event_data {
++ struct list_head node;
++ uint32_t type;
++ uint32_t immed_dat;
++ void * data;
++ uint32_t len;
++};
++
++
++/* values for a_topology */
++#define LNK_LOOP 0x1
++#define LNK_PUBLIC_LOOP 0x2
++#define LNK_FABRIC 0x3
++#define LNK_PT2PT 0x4
++
++/* values for a_linkState */
++#define LNK_DOWN 0x1
++#define LNK_UP 0x2
++#define LNK_FLOGI 0x3
++#define LNK_DISCOVERY 0x4
++#define LNK_REDISCOVERY 0x5
++#define LNK_READY 0x6
++
++struct lpfcdfc_host {
++ struct list_head node;
++ int inst;
++ struct lpfc_hba * phba;
++ struct lpfc_vport *vport;
++ struct Scsi_Host * host;
++ struct pci_dev * dev;
++ void (*base_ct_unsol_event)(struct lpfc_hba *,
++ struct lpfc_sli_ring *,
++ struct lpfc_iocbq *);
++ /* Threads waiting for async event */
++ struct list_head ev_waiters;
++ uint32_t blocked;
++ uint32_t ref_count;
++};
++
++
++
++
++static void lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba *,
++ struct lpfc_iocbq *, struct lpfc_iocbq *);
++
++static struct lpfc_dmabufext *
++dfc_cmd_data_alloc(struct lpfc_hba *, char *,
++ struct ulp_bde64 *, uint32_t);
++static int dfc_cmd_data_free(struct lpfc_hba *, struct lpfc_dmabufext *);
++static int dfc_rsp_data_copy(struct lpfc_hba *, uint8_t *,
++ struct lpfc_dmabufext *,
++ uint32_t);
++static int lpfc_issue_ct_rsp(struct lpfc_hba *, uint32_t, struct lpfc_dmabuf *,
++ struct lpfc_dmabufext *);
++
++static struct lpfcdfc_host * lpfcdfc_host_from_hba(struct lpfc_hba *);
++
++static DEFINE_MUTEX(lpfcdfc_lock);
++
++static struct list_head lpfcdfc_hosts = LIST_HEAD_INIT(lpfcdfc_hosts);
++
++static int lpfcdfc_major = 0;
++
++static int
++lpfc_ioctl_hba_rnid(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip,
++ void *dataout)
++{
++ struct nport_id idn;
++ struct lpfc_sli *psli;
++ struct lpfc_iocbq *cmdiocbq = NULL;
++ struct lpfc_iocbq *rspiocbq = NULL;
++ RNID *prsp;
++ uint32_t *pcmd;
++ uint32_t *psta;
++ IOCB_t *rsp;
++ struct lpfc_sli_ring *pring;
++ void *context2;
++ int i0;
++ int rtnbfrsiz;
++ struct lpfc_nodelist *pndl;
++ int rc = 0;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++
++ if (copy_from_user((uint8_t *) &idn, (void __user *) cip->lpfc_arg1,
++ sizeof(struct nport_id))) {
++ rc = EIO;
++ return rc;
++ }
++
++ if (idn.idType == LPFC_WWNN_TYPE)
++ pndl = lpfc_findnode_wwnn(phba->pport,
++ (struct lpfc_name *) idn.wwpn);
++ else
++ pndl = lpfc_findnode_wwpn(phba->pport,
++ (struct lpfc_name *) idn.wwpn);
++
++ if (!pndl || !NLP_CHK_NODE_ACT(pndl))
++ return ENODEV;
++
++ for (i0 = 0;
++ i0 < 10 && (pndl->nlp_flag & NLP_ELS_SND_MASK) == NLP_RNID_SND;
++ i0++) {
++ mdelay(1000);
++ }
++
++ if (i0 == 10) {
++ pndl->nlp_flag &= ~NLP_RNID_SND;
++ return EBUSY;
++ }
++
++ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, (2 * sizeof(uint32_t)), 0,
++ pndl, pndl->nlp_DID, ELS_CMD_RNID);
++ if (!cmdiocbq)
++ return ENOMEM;
++
++ /*
++ * Context2 is used by prep/free to locate cmd and rsp buffers,
++ * but context2 is also used by iocb_wait to hold a rspiocb ptr.
++ * The rsp iocbq can be returned from the completion routine for
++ * iocb_wait, so save the prep/free value locally . It will be
++ * restored after returning from iocb_wait.
++ */
++ context2 = cmdiocbq->context2;
++
++ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL) {
++ rc = ENOMEM;
++ goto sndrndqwt;
++ }
++ rsp = &(rspiocbq->iocb);
++
++ pcmd = (uint32_t *) (((struct lpfc_dmabuf *) cmdiocbq->context2)->virt);
++ *pcmd++ = ELS_CMD_RNID;
++
++ memset((void *) pcmd, 0, sizeof (RNID));
++ ((RNID *) pcmd)->Format = 0;
++ ((RNID *) pcmd)->Format = RNID_TOPOLOGY_DISC;
++ cmdiocbq->context1 = NULL;
++ cmdiocbq->context2 = NULL;
++ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++
++ pndl->nlp_flag |= NLP_RNID_SND;
++ cmdiocbq->iocb.ulpTimeout = (phba->fc_ratov * 2) + 3 ;
++
++ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
++ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
++ pndl->nlp_flag &= ~NLP_RNID_SND;
++ cmdiocbq->context2 = context2;
++
++ if (rc == IOCB_TIMEDOUT) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ cmdiocbq->context1 = NULL;
++ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
++ return EIO;
++ }
++
++ if (rc != IOCB_SUCCESS) {
++ rc = EIO;
++ goto sndrndqwt;
++ }
++
++ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
++ struct lpfc_dmabuf *buf_ptr1, *buf_ptr;
++ buf_ptr1 = (struct lpfc_dmabuf *)(cmdiocbq->context2);
++ buf_ptr = list_entry(buf_ptr1->list.next, struct lpfc_dmabuf,
++ list);
++ psta = (uint32_t*)buf_ptr->virt;
++ prsp = (RNID *) (psta + 1); /* then rnid response data */
++ rtnbfrsiz = prsp->CommonLen + prsp->SpecificLen +
++ sizeof (uint32_t);
++ memcpy((uint8_t *) dataout, (uint8_t *) psta, rtnbfrsiz);
++
++ if (rtnbfrsiz > cip->lpfc_outsz)
++ rtnbfrsiz = cip->lpfc_outsz;
++ if (copy_to_user
++ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz,
++ sizeof (int)))
++ rc = EIO;
++ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
++ uint8_t ls_rjt[8];
++ uint32_t *ls_rjtrsp;
++
++ ls_rjtrsp = (uint32_t*)(ls_rjt + 4);
++
++ /* construct the LS_RJT payload */
++ ls_rjt[0] = 0x01;
++ ls_rjt[1] = 0x00;
++ ls_rjt[2] = 0x00;
++ ls_rjt[3] = 0x00;
++
++ *ls_rjtrsp = be32_to_cpu(rspiocbq->iocb.un.ulpWord[4]);
++ rtnbfrsiz = 8;
++ memcpy((uint8_t *) dataout, (uint8_t *) ls_rjt, rtnbfrsiz);
++ if (copy_to_user
++ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz,
++ sizeof (int)))
++ rc = EIO;
++ } else
++ rc = EACCES;
++
++sndrndqwt:
++ if (cmdiocbq)
++ lpfc_els_free_iocb(phba, cmdiocbq);
++
++ if (rspiocbq)
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++
++ return rc;
++}
++
++static void
++lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba * phba,
++ struct lpfc_iocbq * cmd_iocb_q,
++ struct lpfc_iocbq * rsp_iocb_q)
++{
++ struct lpfc_timedout_iocb_ctxt *iocb_ctxt = cmd_iocb_q->context1;
++
++ if (!iocb_ctxt) {
++ if (cmd_iocb_q->context2)
++ lpfc_els_free_iocb(phba, cmd_iocb_q);
++ else
++ lpfc_sli_release_iocbq(phba,cmd_iocb_q);
++ return;
++ }
++
++ if (iocb_ctxt->outdmp)
++ dfc_cmd_data_free(phba, iocb_ctxt->outdmp);
++
++ if (iocb_ctxt->indmp)
++ dfc_cmd_data_free(phba, iocb_ctxt->indmp);
++
++ if (iocb_ctxt->mp) {
++ lpfc_mbuf_free(phba,
++ iocb_ctxt->mp->virt,
++ iocb_ctxt->mp->phys);
++ kfree(iocb_ctxt->mp);
++ }
++
++ if (iocb_ctxt->bmp) {
++ lpfc_mbuf_free(phba,
++ iocb_ctxt->bmp->virt,
++ iocb_ctxt->bmp->phys);
++ kfree(iocb_ctxt->bmp);
++ }
++
++ lpfc_sli_release_iocbq(phba,cmd_iocb_q);
++
++ if (iocb_ctxt->rspiocbq)
++ lpfc_sli_release_iocbq(phba, iocb_ctxt->rspiocbq);
++
++ kfree(iocb_ctxt);
++}
++
++
++static int
++lpfc_ioctl_send_els(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip, void *dataout)
++{
++ struct lpfc_sli *psli = &phba->sli;
++ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
++ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
++ struct lpfc_dmabufext *pcmdext = NULL, *prspext = NULL;
++ struct lpfc_nodelist *pndl;
++ struct ulp_bde64 *bpl;
++ IOCB_t *rsp;
++ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist = NULL;
++ uint16_t rpi = 0;
++ struct nport_id destID;
++ int rc = 0;
++ uint32_t cmdsize;
++ uint32_t rspsize;
++ uint32_t elscmd;
++ int iocb_status;
++
++ elscmd = *(uint32_t *)cip->lpfc_arg2;
++ cmdsize = cip->lpfc_arg4;
++ rspsize = cip->lpfc_outsz;
++
++ if (copy_from_user((uint8_t *)&destID, (void __user *)cip->lpfc_arg1,
++ sizeof(struct nport_id)))
++ return EIO;
++
++ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL)
++ return ENOMEM;
++
++ rsp = &rspiocbq->iocb;
++
++ if (destID.idType == 0)
++ pndl = lpfc_findnode_wwpn(phba->pport,
++ (struct lpfc_name *)&destID.wwpn);
++ else {
++ destID.d_id = (destID.d_id & Mask_DID);
++ pndl = lpfc_findnode_did(phba->pport, destID.d_id);
++ }
++
++ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
++ if (destID.idType == 0) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return ENODEV;
++ }
++ if (!pndl) {
++ pndl = kmalloc(sizeof (struct lpfc_nodelist),
++ GFP_KERNEL);
++ if (!pndl) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return ENODEV;
++ }
++ lpfc_nlp_init(phba->pport, pndl, destID.d_id);
++ lpfc_nlp_set_state(phba->pport, pndl, NLP_STE_NPR_NODE);
++ } else {
++ pndl = lpfc_enable_node(phba->pport, pndl,
++ NLP_STE_NPR_NODE);
++ if (!pndl) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return ENODEV;
++ }
++ }
++ } else {
++ lpfc_nlp_get(pndl);
++ rpi = pndl->nlp_rpi;
++ }
++
++ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, pndl,
++ pndl->nlp_DID, elscmd);
++
++ /* release the new pndl once the iocb complete */
++ lpfc_nlp_put(pndl);
++
++ if (cmdiocbq == NULL) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return EIO;
++ }
++
++ pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
++ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
++
++ /*
++ * If we exceed the size of the allocated mbufs we need to
++ * free them and allocate our own.
++ */
++ if ((cmdsize > LPFC_BPL_SIZE) || (rspsize > LPFC_BPL_SIZE)) {
++ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
++ kfree(pcmd);
++ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
++ kfree(prsp);
++ cmdiocbq->context2 = NULL;
++
++ pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
++ bpl = (struct ulp_bde64 *) pbuflist->virt;
++ pcmdext = dfc_cmd_data_alloc(phba, cip->lpfc_arg2,
++ bpl, cmdsize);
++ if (!pcmdext) {
++ lpfc_els_free_iocb(phba, cmdiocbq);
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return ENOMEM;
++ }
++ bpl += pcmdext->flag;
++ prspext = dfc_cmd_data_alloc(phba, NULL, bpl, rspsize);
++ if (!prspext) {
++ dfc_cmd_data_free(phba, pcmdext);
++ lpfc_els_free_iocb(phba, cmdiocbq);
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return ENOMEM;
++ }
++ } else {
++ /* Copy the command from user space */
++ if (copy_from_user((uint8_t *) pcmd->virt,
++ (void __user *) cip->lpfc_arg2,
++ cmdsize)) {
++ lpfc_els_free_iocb(phba, cmdiocbq);
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return EIO;
++ }
++ }
++
++ cmdiocbq->iocb.ulpContext = rpi;
++ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++ cmdiocbq->context1 = NULL;
++ cmdiocbq->context2 = NULL;
++
++ iocb_status = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
++ (phba->fc_ratov*2) + LPFC_DRVR_TIMEOUT);
++ rc = iocb_status;
++
++ if (rc == IOCB_SUCCESS) {
++ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
++ if (rspsize < (rsp->un.ulpWord[0] & 0xffffff)) {
++ rc = ERANGE;
++ } else {
++ rspsize = rsp->un.ulpWord[0] & 0xffffff;
++ if (pbuflist) {
++ if (dfc_rsp_data_copy(
++ phba,
++ (uint8_t *) cip->lpfc_dataout,
++ prspext,
++ rspsize)) {
++ rc = EIO;
++ } else {
++ cip->lpfc_outsz = 0;
++ }
++ } else {
++ if (copy_to_user( (void __user *)
++ cip->lpfc_dataout,
++ (uint8_t *) prsp->virt,
++ rspsize)) {
++ rc = EIO;
++ } else {
++ cip->lpfc_outsz = 0;
++ }
++ }
++ }
++ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
++ uint8_t ls_rjt[8];
++
++ /* construct the LS_RJT payload */
++ ls_rjt[0] = 0x01;
++ ls_rjt[1] = 0x00;
++ ls_rjt[2] = 0x00;
++ ls_rjt[3] = 0x00;
++ memcpy(&ls_rjt[4], (uint8_t *) &rsp->un.ulpWord[4],
++ sizeof(uint32_t));
++
++ if (rspsize < 8)
++ rc = ERANGE;
++ else
++ rspsize = 8;
++
++ memcpy(dataout, ls_rjt, rspsize);
++ } else
++ rc = EIO;
++
++ if (copy_to_user((void __user *)cip->lpfc_arg3,
++ (uint8_t *)&rspsize, sizeof(uint32_t)))
++ rc = EIO;
++ } else {
++ rc = EIO;
++ }
++
++ if (pbuflist) {
++ dfc_cmd_data_free(phba, pcmdext);
++ dfc_cmd_data_free(phba, prspext);
++ } else
++ cmdiocbq->context2 = (uint8_t *) pcmd;
++
++ if (iocb_status != IOCB_TIMEDOUT)
++ lpfc_els_free_iocb(phba, cmdiocbq);
++
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ return rc;
++}
++
++static int
++lpfc_ioctl_send_mgmt_rsp(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
++ struct ulp_bde64 *bpl;
++ struct lpfc_dmabuf *bmp = NULL;
++ struct lpfc_dmabufext *indmp = NULL;
++ uint32_t tag = (uint32_t)cip->lpfc_flag; /* XRI for XMIT_SEQUENCE */
++ unsigned long reqbfrcnt = (unsigned long)cip->lpfc_arg2;
++ int rc = 0;
++ unsigned long iflag;
++
++ if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
++ rc = ERANGE;
++ return rc;
++ }
++
++ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++ if (!bmp) {
++ rc = ENOMEM;
++ goto send_mgmt_rsp_exit;
++ }
++ spin_lock_irqsave(shost->host_lock, iflag);
++ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
++ spin_unlock_irqrestore(shost->host_lock, iflag); /* remove */
++ if (!bmp->virt) {
++ rc = ENOMEM;
++ goto send_mgmt_rsp_free_bmp;
++ }
++
++ INIT_LIST_HEAD(&bmp->list);
++ bpl = (struct ulp_bde64 *) bmp->virt;
++
++ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt);
++ if (!indmp) {
++ rc = ENOMEM;
++ goto send_mgmt_rsp_free_bmpvirt;
++ }
++ rc = lpfc_issue_ct_rsp(phba, tag, bmp, indmp);
++ if (rc) {
++ if (rc == IOCB_TIMEDOUT)
++ rc = ETIMEDOUT;
++ else if (rc == IOCB_ERROR)
++ rc = EACCES;
++ }
++
++ dfc_cmd_data_free(phba, indmp);
++send_mgmt_rsp_free_bmpvirt:
++ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
++send_mgmt_rsp_free_bmp:
++ kfree(bmp);
++send_mgmt_rsp_exit:
++ return rc;
++}
++
++static int
++lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip, void *dataout)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
++ struct lpfc_nodelist *pndl = NULL;
++ struct ulp_bde64 *bpl = NULL;
++ struct lpfc_name findwwn;
++ uint32_t finddid, timeout;
++ struct lpfc_iocbq *cmdiocbq = NULL, *rspiocbq = NULL;
++ struct lpfc_dmabufext *indmp = NULL, *outdmp = NULL;
++ IOCB_t *cmd = NULL, *rsp = NULL;
++ struct lpfc_dmabuf *bmp = NULL;
++ struct lpfc_sli *psli = NULL;
++ struct lpfc_sli_ring *pring = NULL;
++ int i0 = 0, rc = 0, reqbfrcnt, snsbfrcnt;
++ struct lpfc_timedout_iocb_ctxt *iocb_ctxt;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++
++ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
++ rc = EACCES;
++ goto send_mgmt_cmd_exit;
++ }
++
++ reqbfrcnt = cip->lpfc_arg4;
++ snsbfrcnt = cip->lpfc_arg5;
++
++ if (!reqbfrcnt || !snsbfrcnt
++ || (reqbfrcnt + snsbfrcnt > 80 * BUF_SZ_4K)) {
++ rc = ERANGE;
++ goto send_mgmt_cmd_exit;
++ }
++
++ if (phba->pport->port_state != LPFC_VPORT_READY) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_exit;
++ }
++
++ if (cip->lpfc_cmd == LPFC_HBA_SEND_MGMT_CMD) {
++ rc = copy_from_user(&findwwn, (void __user *)cip->lpfc_arg3,
++ sizeof(struct lpfc_name));
++ if (rc) {
++ rc = EIO;
++ goto send_mgmt_cmd_exit;
++ }
++ pndl = lpfc_findnode_wwpn(phba->pport, &findwwn);
++ /* Do additional get to pndl found so that at the end of the
++ * function we can do unditional lpfc_nlp_put on it.
++ */
++ if (pndl && NLP_CHK_NODE_ACT(pndl))
++ lpfc_nlp_get(pndl);
++ } else {
++ finddid = (uint32_t)(unsigned long)cip->lpfc_arg3;
++ pndl = lpfc_findnode_did(phba->pport, finddid);
++ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
++ if (phba->pport->fc_flag & FC_FABRIC) {
++ if (!pndl) {
++ pndl = kmalloc(sizeof
++ (struct lpfc_nodelist),
++ GFP_KERNEL);
++ if (!pndl) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_exit;
++ }
++ lpfc_nlp_init(phba->pport, pndl,
++ finddid);
++ lpfc_nlp_set_state(phba->pport,
++ pndl, NLP_STE_PLOGI_ISSUE);
++ /* Indicate free ioctl allocated
++ * memory for ndlp after it's done
++ */
++ NLP_SET_FREE_REQ(pndl);
++ } else
++ lpfc_enable_node(phba->pport,
++ pndl, NLP_STE_PLOGI_ISSUE);
++
++ if (lpfc_issue_els_plogi(phba->pport,
++ pndl->nlp_DID, 0)) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++
++ /* Allow the node to complete discovery */
++ while (i0++ < 4) {
++ if (pndl->nlp_state ==
++ NLP_STE_UNMAPPED_NODE)
++ break;
++ msleep(500);
++ }
++
++ if (i0 == 4) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++ } else {
++ rc = ENODEV;
++ goto send_mgmt_cmd_exit;
++ }
++ } else
++ /* Do additional get to pndl found so at the end of
++ * the function we can do unconditional lpfc_nlp_put.
++ */
++ lpfc_nlp_get(pndl);
++ }
++
++ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_exit;
++ }
++
++ if (pndl->nlp_flag & NLP_ELS_SND_MASK) {
++ rc = ENODEV;
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++
++ spin_lock_irq(shost->host_lock);
++ cmdiocbq = lpfc_sli_get_iocbq(phba);
++ if (!cmdiocbq) {
++ rc = ENOMEM;
++ spin_unlock_irq(shost->host_lock);
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++ cmd = &cmdiocbq->iocb;
++
++ rspiocbq = lpfc_sli_get_iocbq(phba);
++ if (!rspiocbq) {
++ rc = ENOMEM;
++ goto send_mgmt_cmd_free_cmdiocbq;
++ }
++ spin_unlock_irq(shost->host_lock);
++
++ rsp = &rspiocbq->iocb;
++
++ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++ if (!bmp) {
++ rc = ENOMEM;
++ spin_lock_irq(shost->host_lock);
++ goto send_mgmt_cmd_free_rspiocbq;
++ }
++
++ spin_lock_irq(shost->host_lock);
++ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
++ if (!bmp->virt) {
++ rc = ENOMEM;
++ goto send_mgmt_cmd_free_bmp;
++ }
++ spin_unlock_irq(shost->host_lock);
++
++ INIT_LIST_HEAD(&bmp->list);
++ bpl = (struct ulp_bde64 *) bmp->virt;
++ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt);
++ if (!indmp) {
++ rc = ENOMEM;
++ spin_lock_irq(shost->host_lock);
++ goto send_mgmt_cmd_free_bmpvirt;
++ }
++
++ /* flag contains total number of BPLs for xmit */
++ bpl += indmp->flag;
++
++ outdmp = dfc_cmd_data_alloc(phba, NULL, bpl, snsbfrcnt);
++ if (!outdmp) {
++ rc = ENOMEM;
++ spin_lock_irq(shost->host_lock);
++ goto send_mgmt_cmd_free_indmp;
++ }
++
++ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
++ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
++ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
++ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
++ cmd->un.genreq64.bdl.bdeSize =
++ (outdmp->flag + indmp->flag) * sizeof (struct ulp_bde64);
++ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
++ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
++ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
++ cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
++ cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
++ cmd->ulpBdeCount = 1;
++ cmd->ulpLe = 1;
++ cmd->ulpClass = CLASS3;
++ cmd->ulpContext = pndl->nlp_rpi;
++ cmd->ulpOwner = OWN_CHIP;
++ cmdiocbq->vport = phba->pport;
++ cmdiocbq->context1 = NULL;
++ cmdiocbq->context2 = NULL;
++ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++
++ if (cip->lpfc_flag == 0 )
++ timeout = phba->fc_ratov * 2 ;
++ else
++ timeout = cip->lpfc_flag;
++
++ cmd->ulpTimeout = timeout;
++
++ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
++ timeout + LPFC_DRVR_TIMEOUT);
++
++ if (rc == IOCB_TIMEDOUT) {
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++ iocb_ctxt = kmalloc(sizeof(struct lpfc_timedout_iocb_ctxt),
++ GFP_KERNEL);
++ if (!iocb_ctxt) {
++ rc = EACCES;
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++
++ cmdiocbq->context1 = iocb_ctxt;
++ cmdiocbq->context2 = NULL;
++ iocb_ctxt->rspiocbq = NULL;
++ iocb_ctxt->mp = NULL;
++ iocb_ctxt->bmp = bmp;
++ iocb_ctxt->outdmp = outdmp;
++ iocb_ctxt->lpfc_cmd = NULL;
++ iocb_ctxt->indmp = indmp;
++
++ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
++ rc = EACCES;
++ goto send_mgmt_cmd_free_pndl_exit;
++ }
++
++ if (rc != IOCB_SUCCESS) {
++ rc = EACCES;
++ goto send_mgmt_cmd_free_outdmp;
++ }
++
++ if (rsp->ulpStatus) {
++ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
++ switch (rsp->un.ulpWord[4] & 0xff) {
++ case IOERR_SEQUENCE_TIMEOUT:
++ rc = ETIMEDOUT;
++ break;
++ case IOERR_INVALID_RPI:
++ rc = EFAULT;
++ break;
++ default:
++ rc = EACCES;
++ break;
++ }
++ goto send_mgmt_cmd_free_outdmp;
++ }
++ } else
++ outdmp->flag = rsp->un.genreq64.bdl.bdeSize;
++
++ /* Copy back response data */
++ if (outdmp->flag > snsbfrcnt) {
++ rc = ERANGE;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1209 C_CT Request error Data: x%x x%x\n",
++ outdmp->flag, BUF_SZ_4K);
++ goto send_mgmt_cmd_free_outdmp;
++ }
++
++ /* copy back size of response, and response itself */
++ memcpy(dataout, &outdmp->flag, sizeof (int));
++ rc = dfc_rsp_data_copy (phba, cip->lpfc_arg2, outdmp, outdmp->flag);
++ if (rc)
++ rc = EIO;
++
++send_mgmt_cmd_free_outdmp:
++ spin_lock_irq(shost->host_lock);
++ dfc_cmd_data_free(phba, outdmp);
++send_mgmt_cmd_free_indmp:
++ dfc_cmd_data_free(phba, indmp);
++send_mgmt_cmd_free_bmpvirt:
++ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
++send_mgmt_cmd_free_bmp:
++ kfree(bmp);
++send_mgmt_cmd_free_rspiocbq:
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++send_mgmt_cmd_free_cmdiocbq:
++ lpfc_sli_release_iocbq(phba, cmdiocbq);
++ spin_unlock_irq(shost->host_lock);
++send_mgmt_cmd_free_pndl_exit:
++ lpfc_nlp_put(pndl);
++send_mgmt_cmd_exit:
++ return rc;
++}
++
++static inline struct lpfcdfc_event *
++lpfcdfc_event_new(uint32_t ev_mask,
++ int ev_reg_id,
++ uint32_t ev_req_id)
++{
++ struct lpfcdfc_event * evt = kzalloc(sizeof(*evt), GFP_KERNEL);
++ if (evt == NULL)
++ return NULL;
++
++ INIT_LIST_HEAD(&evt->events_to_get);
++ INIT_LIST_HEAD(&evt->events_to_see);
++ evt->type_mask = ev_mask;
++ evt->req_id = ev_req_id;
++ evt->reg_id = ev_reg_id;
++ evt->wait_time_stamp = jiffies;
++ init_waitqueue_head(&evt->wq);
++
++ return evt;
++}
++
++static inline void lpfcdfc_event_free(struct lpfcdfc_event * evt)
++{
++ struct event_data * ed;
++
++ list_del(&evt->node);
++
++ while(!list_empty(&evt->events_to_get)) {
++ ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
++ list_del(&ed->node);
++ kfree(ed->data);
++ kfree(ed);
++ }
++
++ while(!list_empty(&evt->events_to_see)) {
++ ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
++ list_del(&ed->node);
++ kfree(ed->data);
++ kfree(ed);
++ }
++
++ kfree(evt);
++}
++
++#define lpfcdfc_event_ref(evt) evt->ref++
++
++#define lpfcdfc_event_unref(evt) \
++ if (--evt->ref < 0) \
++ lpfcdfc_event_free(evt);
++
++static int
++lpfc_ioctl_hba_get_event(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip,
++ void **dataout, int *data_size)
++{
++ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 &
++ FC_REG_EVENT_MASK);
++ int ev_reg_id = (uint32_t) cip->lpfc_flag;
++ uint32_t ev_req_id = 0;
++ struct lpfcdfc_host * dfchba;
++ struct lpfcdfc_event * evt;
++ struct event_data * evt_dat = NULL;
++ int ret_val = 0;
++
++ /* All other events supported through NET_LINK_EVENTs */
++ if (ev_mask != FC_REG_CT_EVENT)
++ return ENOENT;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node)
++ if (dfchba->phba == phba)
++ break;
++ mutex_unlock(&lpfcdfc_lock);
++
++ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
++
++ if ((ev_mask == FC_REG_CT_EVENT) &&
++ copy_from_user(&ev_req_id, (void __user *)cip->lpfc_arg2,
++ sizeof (uint32_t)))
++ return EIO;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(evt, &dfchba->ev_waiters, node)
++ if (evt->reg_id == ev_reg_id) {
++ if(list_empty(&evt->events_to_get))
++ break;
++ lpfcdfc_event_ref(evt);
++ evt->wait_time_stamp = jiffies;
++ evt_dat = list_entry(evt->events_to_get.prev,
++ struct event_data, node);
++ list_del(&evt_dat->node);
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++ if (evt_dat == NULL)
++ return ENOENT;
++
++ BUG_ON((ev_mask & evt_dat->type) == 0);
++
++ if (evt_dat->len > cip->lpfc_outsz)
++ evt_dat->len = cip->lpfc_outsz;
++
++ if (copy_to_user((void __user *)cip->lpfc_arg2, &evt_dat->immed_dat,
++ sizeof (uint32_t)) ||
++ copy_to_user((void __user *)cip->lpfc_arg1, &evt_dat->len,
++ sizeof (uint32_t))) {
++ ret_val = EIO;
++ goto error_get_event_exit;
++ }
++
++ if (evt_dat->len > 0) {
++ *data_size = evt_dat->len;
++ *dataout = kmalloc(*data_size, GFP_KERNEL);
++ if (*dataout)
++ memcpy(*dataout, evt_dat->data, *data_size);
++ else
++ *data_size = 0;
++
++ } else
++ *data_size = 0;
++ ret_val = 0;
++
++error_get_event_exit:
++
++ kfree(evt_dat->data);
++ kfree(evt_dat);
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++
++ return ret_val;
++}
++
++static int
++lpfc_ioctl_hba_set_event(struct lpfc_hba * phba,
++ struct lpfcCmdInput * cip)
++{
++ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 &
++ FC_REG_EVENT_MASK);
++ int ev_reg_id = cip->lpfc_flag;
++ uint32_t ev_req_id = 0;
++
++ struct lpfcdfc_host * dfchba;
++ struct lpfcdfc_event * evt;
++
++ int ret_val = 0;
++
++ /* All other events supported through NET_LINK_EVENTs */
++ if (ev_mask != FC_REG_CT_EVENT)
++ return ENOENT;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
++ if (dfchba->phba == phba)
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
++
++ if (ev_mask == FC_REG_CT_EVENT)
++ ev_req_id = ((uint32_t)(unsigned long)cip->lpfc_arg2);
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
++ if (evt->reg_id == ev_reg_id) {
++ lpfcdfc_event_ref(evt);
++ evt->wait_time_stamp = jiffies;
++ break;
++ }
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++ if (&evt->node == &dfchba->ev_waiters) {
++ /* no event waiting struct yet - first call */
++ evt = lpfcdfc_event_new(ev_mask, ev_reg_id, ev_req_id);
++ if (evt == NULL)
++ return ENOMEM;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_add(&evt->node, &dfchba->ev_waiters);
++ lpfcdfc_event_ref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++ }
++
++ evt->waiting = 1;
++ if (wait_event_interruptible(evt->wq,
++ (!list_empty(&evt->events_to_see) ||
++ dfchba->blocked))) {
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt); /* release ref */
++ lpfcdfc_event_unref(evt); /* delete */
++ mutex_unlock(&lpfcdfc_lock);
++ return EINTR;
++ }
++
++ mutex_lock(&lpfcdfc_lock);
++ if (dfchba->blocked) {
++ lpfcdfc_event_unref(evt);
++ lpfcdfc_event_unref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++ return ENODEV;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++ evt->wait_time_stamp = jiffies;
++ evt->waiting = 0;
++
++ BUG_ON(list_empty(&evt->events_to_see));
++
++ mutex_lock(&lpfcdfc_lock);
++ list_move(evt->events_to_see.prev, &evt->events_to_get);
++ lpfcdfc_event_unref(evt); /* release ref */
++ mutex_unlock(&lpfcdfc_lock);
++
++ return ret_val;
++}
++
++static int
++lpfc_ioctl_loopback_mode(struct lpfc_hba *phba,
++ struct lpfcCmdInput *cip, void *dataout)
++{
++ struct Scsi_Host *shost;
++ struct lpfc_sli *psli = &phba->sli;
++ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
++ uint32_t link_flags = cip->lpfc_arg4;
++ uint32_t timeout = cip->lpfc_arg5 * 100;
++ struct lpfc_vport **vports;
++ LPFC_MBOXQ_t *pmboxq;
++ int mbxstatus;
++ int i = 0;
++ int rc = 0;
++
++ if ((phba->link_state == LPFC_HBA_ERROR) ||
++ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
++ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
++ return EACCES;
++
++ if ((pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL)) == 0)
++ return ENOMEM;
++
++ vports = lpfc_create_vport_work_array(phba);
++ if (vports != NULL) {
++ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){
++ shost = lpfc_shost_from_vport(vports[i]);
++ scsi_block_requests(shost);
++ }
++ lpfc_destroy_vport_work_array(phba, vports);
++ }
++ else {
++ shost = lpfc_shost_from_vport(phba->pport);
++ scsi_block_requests(shost);
++ }
++
++ while (pring->txcmplq_cnt) {
++ if (i++ > 500) /* wait up to 5 seconds */
++ break;
++
++ mdelay(10);
++ }
++
++ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
++ pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
++ pmboxq->mb.mbxOwner = OWN_HOST;
++
++ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
++
++ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
++
++ /* wait for link down before proceeding */
++ i = 0;
++ while (phba->link_state != LPFC_LINK_DOWN) {
++ if (i++ > timeout) {
++ rc = ETIMEDOUT;
++ goto loopback_mode_exit;
++ }
++ msleep(10);
++ }
++
++ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
++ if (link_flags == INTERNAL_LOOP_BACK)
++ pmboxq->mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
++ else
++ pmboxq->mb.un.varInitLnk.link_flags =
++ FLAGS_TOPOLOGY_MODE_LOOP;
++
++ pmboxq->mb.mbxCommand = MBX_INIT_LINK;
++ pmboxq->mb.mbxOwner = OWN_HOST;
++
++ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
++ LPFC_MBOX_TMO);
++
++ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->mb.mbxStatus))
++ rc = ENODEV;
++ else {
++ phba->link_flag |= LS_LOOPBACK_MODE;
++ /* wait for the link attention interrupt */
++ msleep(100);
++
++ i = 0;
++ while (phba->link_state != LPFC_HBA_READY) {
++ if (i++ > timeout) {
++ rc = ETIMEDOUT;
++ break;
++ }
++ msleep(10);
++ }
++ }
++ } else
++ rc = ENODEV;
++
++loopback_mode_exit:
++ vports = lpfc_create_vport_work_array(phba);
++ if (vports != NULL) {
++ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){
++ shost = lpfc_shost_from_vport(vports[i]);
++ scsi_unblock_requests(shost);
++ }
++ lpfc_destroy_vport_work_array(phba, vports);
++ }
++ else {
++ shost = lpfc_shost_from_vport(phba->pport);
++ scsi_unblock_requests(shost);
++ }
++
++ /*
++ * Let SLI layer release mboxq if mbox command completed after timeout.
++ */
++ if (mbxstatus != MBX_TIMEOUT)
++ mempool_free( pmboxq, phba->mbox_mem_pool);
++
++ return rc;
++}
++
++static int lpfcdfc_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
++{
++ LPFC_MBOXQ_t *mbox;
++ struct lpfc_dmabuf *dmabuff;
++ int status;
++
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (mbox == NULL)
++ return ENOMEM;
++
++ status = lpfc_reg_login(phba, 0, phba->pport->fc_myDID,
++ (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
++ if (status) {
++ mempool_free(mbox, phba->mbox_mem_pool);
++ return ENOMEM;
++ }
++
++ dmabuff = (struct lpfc_dmabuf *) mbox->context1;
++ mbox->context1 = NULL;
++ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
++
++ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) {
++ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
++ kfree(dmabuff);
++ if (status != MBX_TIMEOUT)
++ mempool_free(mbox, phba->mbox_mem_pool);
++ return ENODEV;
++ }
++
++ *rpi = mbox->mb.un.varWords[0];
++
++ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
++ kfree(dmabuff);
++ mempool_free(mbox, phba->mbox_mem_pool);
++
++ return 0;
++}
++
++static int lpfcdfc_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
++{
++ LPFC_MBOXQ_t * mbox;
++ int status;
++
++ /* Allocate mboxq structure */
++ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
++ if (mbox == NULL)
++ return ENOMEM;
++
++ lpfc_unreg_login(phba, 0, rpi, mbox);
++ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
++
++ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) {
++ if (status != MBX_TIMEOUT)
++ mempool_free(mbox, phba->mbox_mem_pool);
++ return EIO;
++ }
++
++ mempool_free(mbox, phba->mbox_mem_pool);
++ return 0;
++}
++
++
++static int lpfcdfc_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
++ uint16_t *txxri, uint16_t * rxxri)
++{
++ struct lpfc_sli *psli = &phba->sli;
++ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
++
++ struct lpfcdfc_host * dfchba;
++ struct lpfcdfc_event * evt;
++
++ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
++ IOCB_t *cmd, *rsp;
++
++ struct lpfc_dmabuf * dmabuf;
++ struct ulp_bde64 *bpl = NULL;
++ struct lpfc_sli_ct_request *ctreq = NULL;
++
++ int ret_val = 0;
++
++ *txxri = 0;
++ *rxxri = 0;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
++ if (dfchba->phba == phba)
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
++
++ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid,
++ SLI_CT_ELX_LOOPBACK);
++ if (evt == NULL)
++ return ENOMEM;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_add(&evt->node, &dfchba->ev_waiters);
++ lpfcdfc_event_ref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++
++ cmdiocbq = lpfc_sli_get_iocbq(phba);
++ rspiocbq = lpfc_sli_get_iocbq(phba);
++
++ dmabuf = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++ if (dmabuf) {
++ dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
++ INIT_LIST_HEAD(&dmabuf->list);
++ bpl = (struct ulp_bde64 *) dmabuf->virt;
++ memset(bpl, 0, sizeof(*bpl));
++ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
++ bpl->addrHigh =
++ le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
++ bpl->addrLow =
++ le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
++ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
++ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
++ bpl->tus.w = le32_to_cpu(bpl->tus.w);
++ }
++
++ if (cmdiocbq == NULL || rspiocbq == NULL ||
++ dmabuf == NULL || bpl == NULL || ctreq == NULL) {
++ ret_val = ENOMEM;
++ goto err_get_xri_exit;
++ }
++
++ cmd = &cmdiocbq->iocb;
++ rsp = &rspiocbq->iocb;
++
++ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
++
++ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
++ ctreq->RevisionId.bits.InId = 0;
++ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
++ ctreq->FsSubType = 0;
++ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
++ ctreq->CommandResponse.bits.Size = 0;
++
++
++ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
++ cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
++ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
++ cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
++
++ cmd->un.xseq64.w5.hcsw.Fctl = LA;
++ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
++ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
++ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
++
++ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
++ cmd->ulpBdeCount = 1;
++ cmd->ulpLe = 1;
++ cmd->ulpClass = CLASS3;
++ cmd->ulpContext = rpi;
++
++ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++ cmdiocbq->vport = phba->pport;
++
++ ret_val = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
++ (phba->fc_ratov * 2)
++ + LPFC_DRVR_TIMEOUT);
++ if (ret_val)
++ goto err_get_xri_exit;
++
++ *txxri = rsp->ulpContext;
++
++ evt->waiting = 1;
++ evt->wait_time_stamp = jiffies;
++ ret_val = wait_event_interruptible_timeout(
++ evt->wq, !list_empty(&evt->events_to_see),
++ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
++ if (list_empty(&evt->events_to_see))
++ ret_val = (ret_val) ? EINTR : ETIMEDOUT;
++ else {
++ ret_val = IOCB_SUCCESS;
++ mutex_lock(&lpfcdfc_lock);
++ list_move(evt->events_to_see.prev, &evt->events_to_get);
++ mutex_unlock(&lpfcdfc_lock);
++ *rxxri = (list_entry(evt->events_to_get.prev,
++ typeof(struct event_data),
++ node))->immed_dat;
++ }
++ evt->waiting = 0;
++
++err_get_xri_exit:
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt); /* release ref */
++ lpfcdfc_event_unref(evt); /* delete */
++ mutex_unlock(&lpfcdfc_lock);
++
++ if(dmabuf) {
++ if(dmabuf->virt)
++ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
++ kfree(dmabuf);
++ }
++
++ if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
++ lpfc_sli_release_iocbq(phba, cmdiocbq);
++ if (rspiocbq)
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++
++ return ret_val;
++}
++
++static int lpfcdfc_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
++ size_t len)
++{
++ struct lpfc_sli *psli = &phba->sli;
++ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
++ struct lpfc_iocbq *cmdiocbq;
++ IOCB_t *cmd = NULL;
++ struct list_head head, *curr, *next;
++ struct lpfc_dmabuf *rxbmp;
++ struct lpfc_dmabuf *dmp;
++ struct lpfc_dmabuf *mp[2] = {NULL, NULL};
++ struct ulp_bde64 *rxbpl = NULL;
++ uint32_t num_bde;
++ struct lpfc_dmabufext *rxbuffer = NULL;
++ int ret_val = 0;
++ int i = 0;
++
++ cmdiocbq = lpfc_sli_get_iocbq(phba);
++ rxbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++ if (rxbmp != NULL) {
++ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
++ INIT_LIST_HEAD(&rxbmp->list);
++ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
++ rxbuffer = dfc_cmd_data_alloc(phba, NULL, rxbpl, len);
++ }
++
++ if(cmdiocbq == NULL || rxbmp == NULL ||
++ rxbpl == NULL || rxbuffer == NULL) {
++ ret_val = ENOMEM;
++ goto err_post_rxbufs_exit;
++ }
++
++ /* Queue buffers for the receive exchange */
++ num_bde = (uint32_t)rxbuffer->flag;
++ dmp = &rxbuffer->dma;
++
++ cmd = &cmdiocbq->iocb;
++ i = 0;
++
++ INIT_LIST_HEAD(&head);
++ list_add_tail(&head, &dmp->list);
++ list_for_each_safe(curr, next, &head) {
++ mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
++ list_del(curr);
++
++ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
++ mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
++ cmd->un.quexri64cx.buff.bde.addrHigh =
++ putPaddrHigh(mp[i]->phys);
++ cmd->un.quexri64cx.buff.bde.addrLow =
++ putPaddrLow(mp[i]->phys);
++ cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
++ ((struct lpfc_dmabufext *)mp[i])->size;
++ cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
++ cmd->ulpCommand = CMD_QUE_XRI64_CX;
++ cmd->ulpPU = 0;
++ cmd->ulpLe = 1;
++ cmd->ulpBdeCount = 1;
++ cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
++
++ } else {
++ cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
++ cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
++ cmd->un.cont64[i].tus.f.bdeSize =
++ ((struct lpfc_dmabufext *)mp[i])->size;
++ cmd->ulpBdeCount = ++i;
++
++ if ((--num_bde > 0) && (i < 2))
++ continue;
++
++ cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
++ cmd->ulpLe = 1;
++ }
++
++ cmd->ulpClass = CLASS3;
++ cmd->ulpContext = rxxri;
++
++ ret_val = lpfc_sli_issue_iocb(phba, pring, cmdiocbq, 0);
++
++ if (ret_val == IOCB_ERROR) {
++ dfc_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[0]);
++ if (mp[1])
++ dfc_cmd_data_free(phba,
++ (struct lpfc_dmabufext *)mp[1]);
++ dmp = list_entry(next, struct lpfc_dmabuf, list);
++ ret_val = EIO;
++ goto err_post_rxbufs_exit;
++ }
++
++ lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
++ if (mp[1]) {
++ lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
++ mp[1] = NULL;
++ }
++
++ /* The iocb was freed by lpfc_sli_issue_iocb */
++ if ((cmdiocbq = lpfc_sli_get_iocbq(phba)) == NULL) {
++ dmp = list_entry(next, struct lpfc_dmabuf, list);
++ ret_val = EIO;
++ goto err_post_rxbufs_exit;
++ }
++ cmd = &cmdiocbq->iocb;
++ i = 0;
++ }
++ list_del(&head);
++
++err_post_rxbufs_exit:
++
++ if(rxbmp) {
++ if(rxbmp->virt)
++ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
++ kfree(rxbmp);
++ }
++
++ if (cmdiocbq)
++ lpfc_sli_release_iocbq(phba, cmdiocbq);
++
++ return ret_val;
++}
++static int
++lpfc_ioctl_loopback_test(struct lpfc_hba *phba,
++ struct lpfcCmdInput *cip, void *dataout)
++{
++ struct lpfcdfc_host * dfchba;
++ struct lpfcdfc_event * evt;
++ struct event_data * evdat;
++
++ struct lpfc_sli *psli = &phba->sli;
++ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
++ uint32_t size = cip->lpfc_outsz;
++ uint32_t full_size = size + ELX_LOOPBACK_HEADER_SZ;
++ size_t segment_len = 0, segment_offset = 0, current_offset = 0;
++ uint16_t rpi;
++ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
++ IOCB_t *cmd, *rsp;
++ struct lpfc_sli_ct_request *ctreq;
++ struct lpfc_dmabuf *txbmp;
++ struct ulp_bde64 *txbpl = NULL;
++ struct lpfc_dmabufext *txbuffer = NULL;
++ struct list_head head;
++ struct lpfc_dmabuf *curr;
++ uint16_t txxri, rxxri;
++ uint32_t num_bde;
++ uint8_t *ptr = NULL, *rx_databuf = NULL;
++ int rc;
++
++ if ((phba->link_state == LPFC_HBA_ERROR) ||
++ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
++ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
++ return EACCES;
++
++ if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE))
++ return EACCES;
++
++ if ((size == 0) || (size > 80 * BUF_SZ_4K))
++ return ERANGE;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
++ if (dfchba->phba == phba)
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
++
++ rc = lpfcdfc_loop_self_reg(phba, &rpi);
++ if (rc)
++ return rc;
++
++ rc = lpfcdfc_loop_get_xri(phba, rpi, &txxri, &rxxri);
++ if (rc) {
++ lpfcdfc_loop_self_unreg(phba, rpi);
++ return rc;
++ }
++
++ rc = lpfcdfc_loop_post_rxbufs(phba, rxxri, full_size);
++ if (rc) {
++ lpfcdfc_loop_self_unreg(phba, rpi);
++ return rc;
++ }
++
++ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid,
++ SLI_CT_ELX_LOOPBACK);
++ if (evt == NULL) {
++ lpfcdfc_loop_self_unreg(phba, rpi);
++ return ENOMEM;
++ }
++
++ mutex_lock(&lpfcdfc_lock);
++ list_add(&evt->node, &dfchba->ev_waiters);
++ lpfcdfc_event_ref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++
++ cmdiocbq = lpfc_sli_get_iocbq(phba);
++ rspiocbq = lpfc_sli_get_iocbq(phba);
++ txbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++
++ if (txbmp) {
++ txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
++ INIT_LIST_HEAD(&txbmp->list);
++ txbpl = (struct ulp_bde64 *) txbmp->virt;
++ if (txbpl)
++ txbuffer = dfc_cmd_data_alloc(phba, NULL,
++ txbpl, full_size);
++ }
++
++ if (cmdiocbq == NULL || rspiocbq == NULL
++ || txbmp == NULL || txbpl == NULL || txbuffer == NULL) {
++ rc = ENOMEM;
++ goto err_loopback_test_exit;
++ }
++
++ cmd = &cmdiocbq->iocb;
++ rsp = &rspiocbq->iocb;
++
++ INIT_LIST_HEAD(&head);
++ list_add_tail(&head, &txbuffer->dma.list);
++ list_for_each_entry(curr, &head, list) {
++ segment_len = ((struct lpfc_dmabufext *)curr)->size;
++ if (current_offset == 0) {
++ ctreq = curr->virt;
++ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
++ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
++ ctreq->RevisionId.bits.InId = 0;
++ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
++ ctreq->FsSubType = 0;
++ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA ;
++ ctreq->CommandResponse.bits.Size = size;
++ segment_offset = ELX_LOOPBACK_HEADER_SZ;
++ } else
++ segment_offset = 0;
++
++ BUG_ON(segment_offset >= segment_len);
++ if (copy_from_user (curr->virt + segment_offset,
++ (void __user *)cip->lpfc_arg1
++ + current_offset,
++ segment_len - segment_offset)) {
++ rc = EIO;
++ list_del(&head);
++ goto err_loopback_test_exit;
++ }
++
++ current_offset += segment_len - segment_offset;
++ BUG_ON(current_offset > size);
++ }
++ list_del(&head);
++
++ /* Build the XMIT_SEQUENCE iocb */
++
++ num_bde = (uint32_t)txbuffer->flag;
++
++ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
++ cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
++ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
++ cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
++
++ cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
++ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
++ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
++ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
++
++ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
++ cmd->ulpBdeCount = 1;
++ cmd->ulpLe = 1;
++ cmd->ulpClass = CLASS3;
++ cmd->ulpContext = txxri;
++
++ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++ cmdiocbq->vport = phba->pport;
++
++ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
++ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
++
++ if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
++ rc = EIO;
++ goto err_loopback_test_exit;
++ }
++
++ evt->waiting = 1;
++ rc = wait_event_interruptible_timeout(
++ evt->wq, !list_empty(&evt->events_to_see),
++ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
++ evt->waiting = 0;
++ if (list_empty(&evt->events_to_see))
++ rc = (rc) ? EINTR : ETIMEDOUT;
++ else {
++ ptr = dataout;
++ mutex_lock(&lpfcdfc_lock);
++ list_move(evt->events_to_see.prev, &evt->events_to_get);
++ evdat = list_entry(evt->events_to_get.prev,
++ typeof(*evdat), node);
++ mutex_unlock(&lpfcdfc_lock);
++ rx_databuf = evdat->data;
++ if (evdat->len != full_size) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1603 Loopback test did not receive expected "
++ "data length. actual length 0x%x expected "
++ "length 0x%x\n",
++ evdat->len, full_size);
++ rc = EIO;
++ }
++ else if (rx_databuf == NULL)
++ rc = EIO;
++ else {
++ rx_databuf += ELX_LOOPBACK_HEADER_SZ;
++ memcpy(ptr, rx_databuf, size);
++ rc = IOCB_SUCCESS;
++ }
++ }
++
++err_loopback_test_exit:
++ lpfcdfc_loop_self_unreg(phba, rpi);
++
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt); /* release ref */
++ lpfcdfc_event_unref(evt); /* delete */
++ mutex_unlock(&lpfcdfc_lock);
++
++ if (cmdiocbq != NULL)
++ lpfc_sli_release_iocbq(phba, cmdiocbq);
++
++ if (rspiocbq != NULL)
++ lpfc_sli_release_iocbq(phba, rspiocbq);
++
++ if (txbmp != NULL) {
++ if (txbpl != NULL) {
++ if (txbuffer != NULL)
++ dfc_cmd_data_free(phba, txbuffer);
++ lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
++ }
++ kfree(txbmp);
++ }
++ return rc;
++}
++
++static int
++dfc_rsp_data_copy(struct lpfc_hba * phba,
++ uint8_t * outdataptr, struct lpfc_dmabufext * mlist,
++ uint32_t size)
++{
++ struct lpfc_dmabufext *mlast = NULL;
++ int cnt, offset = 0;
++ struct list_head head, *curr, *next;
++
++ if (!mlist)
++ return 0;
++
++ list_add_tail(&head, &mlist->dma.list);
++
++ list_for_each_safe(curr, next, &head) {
++ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
++ if (!size)
++ break;
++
++ /* We copy chucks of 4K */
++ if (size > BUF_SZ_4K)
++ cnt = BUF_SZ_4K;
++ else
++ cnt = size;
++
++ if (outdataptr) {
++ pci_dma_sync_single_for_device(phba->pcidev,
++ mlast->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
++
++ /* Copy data to user space */
++ if (copy_to_user
++ ((void __user *) (outdataptr + offset),
++ (uint8_t *) mlast->dma.virt, cnt))
++ return 1;
++ }
++ offset += cnt;
++ size -= cnt;
++ }
++ list_del(&head);
++ return 0;
++}
++
++static int
++lpfc_issue_ct_rsp(struct lpfc_hba * phba, uint32_t tag,
++ struct lpfc_dmabuf * bmp,
++ struct lpfc_dmabufext * inp)
++{
++ struct lpfc_sli *psli;
++ IOCB_t *icmd;
++ struct lpfc_iocbq *ctiocb;
++ struct lpfc_sli_ring *pring;
++ uint32_t num_entry;
++ int rc = 0;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++ num_entry = inp->flag;
++ inp->flag = 0;
++
++ /* Allocate buffer for command iocb */
++ ctiocb = lpfc_sli_get_iocbq(phba);
++ if (!ctiocb) {
++ rc = ENOMEM;
++ goto issue_ct_rsp_exit;
++ }
++ icmd = &ctiocb->iocb;
++
++ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
++ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
++ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
++ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
++ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
++ icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
++ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
++ icmd->un.xseq64.w5.hcsw.Rctl = FC_SOL_CTL;
++ icmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
++
++ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys, LPFC_BPL_SIZE,
++ PCI_DMA_TODEVICE);
++
++ /* Fill in rest of iocb */
++ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
++ icmd->ulpBdeCount = 1;
++ icmd->ulpLe = 1;
++ icmd->ulpClass = CLASS3;
++ icmd->ulpContext = (ushort) tag;
++ icmd->ulpTimeout = phba->fc_ratov * 2;
++
++ /* Xmit CT response on exchange <xid> */
++ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
++ "1200 Xmit CT response on exchange x%x Data: x%x x%x\n",
++ icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
++
++ ctiocb->iocb_cmpl = NULL;
++ ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
++ ctiocb->vport = phba->pport;
++ rc = lpfc_sli_issue_iocb_wait(phba, pring, ctiocb, NULL,
++ phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT);
++
++ if (rc == IOCB_TIMEDOUT) {
++ ctiocb->context1 = NULL;
++ ctiocb->context2 = NULL;
++ ctiocb->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
++ return rc;
++ }
++
++ /* Calling routine takes care of IOCB_ERROR => EIO translation */
++ if (rc != IOCB_SUCCESS)
++ rc = IOCB_ERROR;
++
++ lpfc_sli_release_iocbq(phba, ctiocb);
++issue_ct_rsp_exit:
++ return rc;
++}
++
++
++static void
++lpfcdfc_ct_unsol_event(struct lpfc_hba * phba,
++ struct lpfc_sli_ring * pring,
++ struct lpfc_iocbq * piocbq)
++{
++ struct lpfcdfc_host * dfchba = lpfcdfc_host_from_hba(phba);
++ uint32_t evt_req_id = 0;
++ uint32_t cmd;
++ uint32_t len;
++ struct lpfc_dmabuf *dmabuf = NULL;
++ struct lpfcdfc_event * evt;
++ struct event_data * evt_dat = NULL;
++ struct lpfc_iocbq * iocbq;
++ size_t offset = 0;
++ struct list_head head;
++ struct ulp_bde64 * bde;
++ dma_addr_t dma_addr;
++ int i;
++ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
++ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
++ struct lpfc_hbq_entry *hbqe;
++
++ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
++ INIT_LIST_HEAD(&head);
++ if (piocbq->iocb.ulpBdeCount == 0 ||
++ piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
++ goto error_unsol_ct_exit;
++
++ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
++ dmabuf = bdeBuf1;
++ else {
++ dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
++ piocbq->iocb.un.cont64[0].addrLow);
++ dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
++ }
++ BUG_ON(dmabuf == NULL);
++ evt_req_id = ((struct lpfc_sli_ct_request *)(dmabuf->virt))->FsType;
++ cmd = ((struct lpfc_sli_ct_request *)
++ (dmabuf->virt))->CommandResponse.bits.CmdRsp;
++ len = ((struct lpfc_sli_ct_request *)
++ (dmabuf->virt))->CommandResponse.bits.Size;
++ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
++ lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
++ if (!(evt->type_mask & FC_REG_CT_EVENT) ||
++ evt->req_id != evt_req_id)
++ continue;
++
++ lpfcdfc_event_ref(evt);
++
++ if ((evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL)) == NULL) {
++ lpfcdfc_event_unref(evt);
++ break;
++ }
++
++ mutex_unlock(&lpfcdfc_lock);
++
++ INIT_LIST_HEAD(&head);
++ list_add_tail(&head, &piocbq->list);
++ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
++ /* take accumulated byte count from the last iocbq */
++ iocbq = list_entry(head.prev, typeof(*iocbq), list);
++ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
++ } else {
++ list_for_each_entry(iocbq, &head, list) {
++ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
++ evt_dat->len +=
++ iocbq->iocb.un.cont64[i].tus.f.bdeSize;
++ }
++ }
++
++
++ evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
++ if (evt_dat->data == NULL) {
++ kfree (evt_dat);
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++ goto error_unsol_ct_exit;
++ }
++
++ list_for_each_entry(iocbq, &head, list) {
++ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
++ bdeBuf1 = iocbq->context2;
++ bdeBuf2 = iocbq->context3;
++ }
++ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
++ int size = 0;
++ if (phba->sli3_options &
++ LPFC_SLI3_HBQ_ENABLED) {
++ BUG_ON(i>1);
++ if (i == 0) {
++ hbqe = (struct lpfc_hbq_entry *)
++ &iocbq->iocb.un.ulpWord[0];
++ size = hbqe->bde.tus.f.bdeSize;
++ dmabuf = bdeBuf1;
++ } else if (i == 1) {
++ hbqe = (struct lpfc_hbq_entry *)
++ &iocbq->iocb.unsli3.
++ sli3Words[4];
++ size = hbqe->bde.tus.f.bdeSize;
++ dmabuf = bdeBuf2;
++ }
++ if ((offset + size) > evt_dat->len)
++ size = evt_dat->len - offset;
++ } else {
++ size = iocbq->iocb.un.cont64[i].
++ tus.f.bdeSize;
++ bde = &iocbq->iocb.un.cont64[i];
++ dma_addr = getPaddr(bde->addrHigh,
++ bde->addrLow);
++ dmabuf = lpfc_sli_ringpostbuf_get(phba,
++ pring, dma_addr);
++ }
++ if (dmabuf == NULL) {
++ kfree (evt_dat->data);
++ kfree (evt_dat);
++ mutex_lock(&lpfcdfc_lock);
++ lpfcdfc_event_unref(evt);
++ mutex_unlock(&lpfcdfc_lock);
++ goto error_unsol_ct_exit;
++ }
++ memcpy ((char *)(evt_dat->data) + offset,
++ dmabuf->virt, size);
++ offset += size;
++ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
++ !(phba->sli3_options &
++ LPFC_SLI3_HBQ_ENABLED))
++ lpfc_sli_ringpostbuf_put(phba, pring,
++ dmabuf);
++ else {
++ switch (cmd) {
++ case ELX_LOOPBACK_DATA:
++ dfc_cmd_data_free(phba,
++ (struct lpfc_dmabufext *)
++ dmabuf);
++ break;
++ case ELX_LOOPBACK_XRI_SETUP:
++ if (!(phba->sli3_options &
++ LPFC_SLI3_HBQ_ENABLED))
++ lpfc_post_buffer(phba,
++ pring,
++ 1);
++ else
++ lpfc_in_buf_free(phba,
++ dmabuf);
++ break;
++ default:
++ if (!(phba->sli3_options &
++ LPFC_SLI3_HBQ_ENABLED))
++ lpfc_post_buffer(phba,
++ pring,
++ 1);
++ break;
++ }
++ }
++ }
++ }
++
++ mutex_lock(&lpfcdfc_lock);
++ evt_dat->immed_dat = piocbq->iocb.ulpContext;
++ evt_dat->type = FC_REG_CT_EVENT;
++ list_add(&evt_dat->node, &evt->events_to_see);
++ wake_up_interruptible(&evt->wq);
++ lpfcdfc_event_unref(evt);
++ if (evt_req_id == SLI_CT_ELX_LOOPBACK)
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++error_unsol_ct_exit:
++ if(!list_empty(&head))
++ list_del(&head);
++ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
++ dfchba->base_ct_unsol_event != NULL)
++ (dfchba->base_ct_unsol_event)(phba, pring, piocbq);
++
++ return;
++}
++
++
++struct lpfc_dmabufext *
++__dfc_cmd_data_alloc(struct lpfc_hba * phba,
++ char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
++ int nocopydata)
++{
++ struct lpfc_dmabufext *mlist = NULL;
++ struct lpfc_dmabufext *dmp;
++ int cnt, offset = 0, i = 0;
++ struct pci_dev *pcidev;
++
++ pcidev = phba->pcidev;
++
++ while (size) {
++ /* We get chunks of 4K */
++ if (size > BUF_SZ_4K)
++ cnt = BUF_SZ_4K;
++ else
++ cnt = size;
++
++ /* allocate struct lpfc_dmabufext buffer header */
++ dmp = kmalloc(sizeof (struct lpfc_dmabufext), GFP_KERNEL);
++ if (dmp == 0)
++ goto out;
++
++ INIT_LIST_HEAD(&dmp->dma.list);
++
++ /* Queue it to a linked list */
++ if (mlist)
++ list_add_tail(&dmp->dma.list, &mlist->dma.list);
++ else
++ mlist = dmp;
++
++ /* allocate buffer */
++ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
++ cnt,
++ &(dmp->dma.phys),
++ GFP_KERNEL);
++
++ if (dmp->dma.virt == NULL)
++ goto out;
++
++ dmp->size = cnt;
++
++ if (indataptr || nocopydata) {
++ if (indataptr)
++ /* Copy data from user space in */
++ if (copy_from_user ((uint8_t *) dmp->dma.virt,
++ (void __user *) (indataptr + offset),
++ cnt)) {
++ goto out;
++ }
++
++ pci_dma_sync_single_for_device(phba->pcidev,
++ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
++
++ } else
++ memset((uint8_t *)dmp->dma.virt, 0, cnt);
++ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
++
++ /* build buffer ptr list for IOCB */
++ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
++ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
++ bpl->tus.f.bdeSize = (ushort) cnt;
++ bpl->tus.w = le32_to_cpu(bpl->tus.w);
++ bpl++;
++
++ i++;
++ offset += cnt;
++ size -= cnt;
++ }
++
++ mlist->flag = i;
++ return mlist;
++out:
++ dfc_cmd_data_free(phba, mlist);
++ return NULL;
++}
++
++static struct lpfc_dmabufext *
++dfc_cmd_data_alloc(struct lpfc_hba * phba,
++ char *indataptr, struct ulp_bde64 * bpl, uint32_t size)
++{
++ /* if indataptr is null it is a rsp buffer. */
++ return __dfc_cmd_data_alloc(phba, indataptr, bpl, size,
++ 0 /* don't copy user data */);
++}
++
++int
++__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
++{
++ return dfc_cmd_data_free(phba, mlist);
++}
++static int
++dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
++{
++ struct lpfc_dmabufext *mlast;
++ struct pci_dev *pcidev;
++ struct list_head head, *curr, *next;
++
++ if ((!mlist) || (!lpfc_is_link_up(phba) &&
++ (phba->link_flag & LS_LOOPBACK_MODE))) {
++ return 0;
++ }
++
++ pcidev = phba->pcidev;
++ list_add_tail(&head, &mlist->dma.list);
++
++ list_for_each_safe(curr, next, &head) {
++ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
++ if (mlast->dma.virt)
++ dma_free_coherent(&pcidev->dev,
++ mlast->size,
++ mlast->dma.virt,
++ mlast->dma.phys);
++ kfree(mlast);
++ }
++ return 0;
++}
++
++
++/* The only reason we need that reverce find, is because we
++ * are bent on keeping original calling conventions.
++ */
++static struct lpfcdfc_host *
++lpfcdfc_host_from_hba(struct lpfc_hba * phba)
++{
++ struct lpfcdfc_host * dfchba;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
++ if (dfchba->phba == phba)
++ break;
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++ return dfchba;
++}
++
++struct lpfcdfc_host *
++lpfcdfc_host_add (struct pci_dev * dev,
++ struct Scsi_Host * host,
++ struct lpfc_hba * phba)
++{
++ struct lpfcdfc_host * dfchba = NULL;
++ struct lpfc_sli_ring_mask * prt = NULL;
++
++ dfchba = kzalloc(sizeof(*dfchba), GFP_KERNEL);
++ if (dfchba == NULL)
++ return NULL;
++
++ dfchba->inst = phba->brd_no;
++ dfchba->phba = phba;
++ dfchba->vport = phba->pport;
++ dfchba->host = host;
++ dfchba->dev = dev;
++ dfchba->blocked = 0;
++
++ spin_lock_irq(&phba->hbalock);
++ prt = phba->sli.ring[LPFC_ELS_RING].prt;
++ dfchba->base_ct_unsol_event = prt[2].lpfc_sli_rcv_unsol_event;
++ prt[2].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event;
++ prt[3].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event;
++ spin_unlock_irq(&phba->hbalock);
++ mutex_lock(&lpfcdfc_lock);
++ list_add_tail(&dfchba->node, &lpfcdfc_hosts);
++ INIT_LIST_HEAD(&dfchba->ev_waiters);
++ mutex_unlock(&lpfcdfc_lock);
++
++ return dfchba;
++}
++
++
++void
++lpfcdfc_host_del (struct lpfcdfc_host * dfchba)
++{
++ struct Scsi_Host * host;
++ struct lpfc_hba * phba = NULL;
++ struct lpfc_sli_ring_mask * prt = NULL;
++ struct lpfcdfc_event * evt;
++
++ mutex_lock(&lpfcdfc_lock);
++ dfchba->blocked = 1;
++
++ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
++ wake_up_interruptible(&evt->wq);
++ }
++
++ while (dfchba->ref_count) {
++ mutex_unlock(&lpfcdfc_lock);
++ msleep(2000);
++ mutex_lock(&lpfcdfc_lock);
++ }
++
++ if (dfchba->dev->driver) {
++ host = pci_get_drvdata(dfchba->dev);
++ if ((host != NULL) &&
++ (struct lpfc_vport *)host->hostdata == dfchba->vport) {
++ phba = dfchba->phba;
++ mutex_unlock(&lpfcdfc_lock);
++ spin_lock_irq(&phba->hbalock);
++ prt = phba->sli.ring[LPFC_ELS_RING].prt;
++ prt[2].lpfc_sli_rcv_unsol_event =
++ dfchba->base_ct_unsol_event;
++ prt[3].lpfc_sli_rcv_unsol_event =
++ dfchba->base_ct_unsol_event;
++ spin_unlock_irq(&phba->hbalock);
++ mutex_lock(&lpfcdfc_lock);
++ }
++ }
++ list_del_init(&dfchba->node);
++ mutex_unlock(&lpfcdfc_lock);
++ kfree (dfchba);
++}
++
++/*
++ * Retrieve lpfc_hba * matching instance (board no)
++ * If found return lpfc_hba *
++ * If not found return NULL
++ */
++static struct lpfcdfc_host *
++lpfcdfc_get_phba_by_inst(int inst)
++{
++ struct Scsi_Host * host = NULL;
++ struct lpfcdfc_host * dfchba;
++
++ mutex_lock(&lpfcdfc_lock);
++ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
++ if (dfchba->inst == inst) {
++ if (dfchba->dev->driver) {
++ host = pci_get_drvdata(dfchba->dev);
++ if ((host != NULL) &&
++ (struct lpfc_vport *)host->hostdata ==
++ dfchba->vport) {
++ mutex_unlock(&lpfcdfc_lock);
++ BUG_ON(dfchba->phba->brd_no != inst);
++ return dfchba;
++ }
++ }
++ mutex_unlock(&lpfcdfc_lock);
++ return NULL;
++ }
++ }
++ mutex_unlock(&lpfcdfc_lock);
++
++ return NULL;
++}
++
++static int
++lpfcdfc_do_ioctl(struct lpfcCmdInput *cip)
++{
++ struct lpfcdfc_host * dfchba = NULL;
++ struct lpfc_hba *phba = NULL;
++ int rc;
++ uint32_t total_mem;
++ void *dataout;
++
++
++ /* Some ioctls are per module and do not need phba */
++ switch (cip->lpfc_cmd) {
++ case LPFC_GET_DFC_REV:
++ break;
++ default:
++ dfchba = lpfcdfc_get_phba_by_inst(cip->lpfc_brd);
++ if (dfchba == NULL)
++ return EINVAL;
++ phba = dfchba->phba;
++ break;
++ };
++
++ if (phba)
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1601 libdfc ioctl entry Data: x%x x%lx x%lx x%x\n",
++ cip->lpfc_cmd, (unsigned long) cip->lpfc_arg1,
++ (unsigned long) cip->lpfc_arg2, cip->lpfc_outsz);
++ mutex_lock(&lpfcdfc_lock);
++ if (dfchba && dfchba->blocked) {
++ mutex_unlock(&lpfcdfc_lock);
++ return EINVAL;
++ }
++ if (dfchba)
++ dfchba->ref_count++;
++ mutex_unlock(&lpfcdfc_lock);
++ if (cip->lpfc_outsz >= BUF_SZ_4K) {
++
++ /*
++ * Allocate memory for ioctl data. If buffer is bigger than 64k,
++ * then we allocate 64k and re-use that buffer over and over to
++ * xfer the whole block. This is because Linux kernel has a
++ * problem allocating more than 120k of kernel space memory. Saw
++ * problem with GET_FCPTARGETMAPPING...
++ */
++ if (cip->lpfc_outsz <= (64 * 1024))
++ total_mem = cip->lpfc_outsz;
++ else
++ total_mem = 64 * 1024;
++ } else {
++ /* Allocate memory for ioctl data */
++ total_mem = BUF_SZ_4K;
++ }
++
++ /*
++ * For LPFC_HBA_GET_EVENT allocate memory which is needed to store
++ * event info. Allocating maximum possible buffer size (64KB) can fail
++ * some times under heavy IO.
++ */
++ if (cip->lpfc_cmd == LPFC_HBA_GET_EVENT) {
++ dataout = NULL;
++ } else {
++ dataout = kmalloc(total_mem, GFP_KERNEL);
++
++ if (!dataout && dfchba != NULL) {
++ mutex_lock(&lpfcdfc_lock);
++ if (dfchba)
++ dfchba->ref_count--;
++ mutex_unlock(&lpfcdfc_lock);
++ return ENOMEM;
++ }
++ }
++
++ switch (cip->lpfc_cmd) {
++
++ case LPFC_GET_DFC_REV:
++ ((struct DfcRevInfo *) dataout)->a_Major = DFC_MAJOR_REV;
++ ((struct DfcRevInfo *) dataout)->a_Minor = DFC_MINOR_REV;
++ cip->lpfc_outsz = sizeof (struct DfcRevInfo);
++ rc = 0;
++ break;
++
++ case LPFC_SEND_ELS:
++ rc = lpfc_ioctl_send_els(phba, cip, dataout);
++ break;
++
++ case LPFC_HBA_SEND_MGMT_RSP:
++ rc = lpfc_ioctl_send_mgmt_rsp(phba, cip);
++ break;
++
++ case LPFC_HBA_SEND_MGMT_CMD:
++ case LPFC_CT:
++ rc = lpfc_ioctl_send_mgmt_cmd(phba, cip, dataout);
++ break;
++
++ case LPFC_HBA_GET_EVENT:
++ rc = lpfc_ioctl_hba_get_event(phba, cip, &dataout, &total_mem);
++ if ((total_mem) && (copy_to_user ((void __user *)
++ cip->lpfc_dataout, (uint8_t *) dataout, total_mem)))
++ rc = EIO;
++ /* This is to prevent copy_to_user at end of the function. */
++ cip->lpfc_outsz = 0;
++ break;
++
++ case LPFC_HBA_SET_EVENT:
++ rc = lpfc_ioctl_hba_set_event(phba, cip);
++ break;
++
++ case LPFC_LOOPBACK_MODE:
++ rc = lpfc_ioctl_loopback_mode(phba, cip, dataout);
++ break;
++
++ case LPFC_LOOPBACK_TEST:
++ rc = lpfc_ioctl_loopback_test(phba, cip, dataout);
++ break;
++
++ case LPFC_HBA_RNID:
++ rc = lpfc_ioctl_hba_rnid(phba, cip, dataout);
++ break;
++
++ default:
++ rc = EINVAL;
++ break;
++ }
++
++ if (phba)
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1602 libdfc ioctl exit Data: x%x x%x x%lx\n",
++ rc, cip->lpfc_outsz, (unsigned long) cip->lpfc_dataout);
++ /* Copy data to user space config method */
++ if (rc == 0) {
++ if (cip->lpfc_outsz) {
++ if (copy_to_user
++ ((void __user *) cip->lpfc_dataout,
++ (uint8_t *) dataout, cip->lpfc_outsz)) {
++ rc = EIO;
++ }
++ }
++ }
++
++ kfree(dataout);
++ mutex_lock(&lpfcdfc_lock);
++ if (dfchba)
++ dfchba->ref_count--;
++ mutex_unlock(&lpfcdfc_lock);
++
++ return rc;
++}
++
++static int
++lpfcdfc_ioctl(struct inode *inode,
++ struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int rc;
++ struct lpfcCmdInput *ci;
++
++ if (!arg)
++ return -EINVAL;
++
++ ci = (struct lpfcCmdInput *) kmalloc(sizeof (struct lpfcCmdInput),
++ GFP_KERNEL);
++
++ if (!ci)
++ return -ENOMEM;
++
++ if ((rc = copy_from_user
++ ((uint8_t *) ci, (void __user *) arg,
++ sizeof (struct lpfcCmdInput)))) {
++ kfree(ci);
++ return -EIO;
++ }
++
++ rc = lpfcdfc_do_ioctl(ci);
++
++ kfree(ci);
++ return -rc;
++}
++
++#ifdef CONFIG_COMPAT
++static long
++lpfcdfc_compat_ioctl(struct file * file, unsigned int cmd, unsigned long arg)
++{
++ struct lpfcCmdInput32 arg32;
++ struct lpfcCmdInput arg64;
++ int ret;
++
++ if(copy_from_user(&arg32, (void __user *)arg,
++ sizeof(struct lpfcCmdInput32)))
++ return -EFAULT;
++
++ arg64.lpfc_brd = arg32.lpfc_brd;
++ arg64.lpfc_ring = arg32.lpfc_ring;
++ arg64.lpfc_iocb = arg32.lpfc_iocb;
++ arg64.lpfc_flag = arg32.lpfc_flag;
++ arg64.lpfc_arg1 = (void *)(unsigned long) arg32.lpfc_arg1;
++ arg64.lpfc_arg2 = (void *)(unsigned long) arg32.lpfc_arg2;
++ arg64.lpfc_arg3 = (void *)(unsigned long) arg32.lpfc_arg3;
++ arg64.lpfc_dataout = (void *)(unsigned long) arg32.lpfc_dataout;
++ arg64.lpfc_cmd = arg32.lpfc_cmd;
++ arg64.lpfc_outsz = arg32.lpfc_outsz;
++ arg64.lpfc_arg4 = arg32.lpfc_arg4;
++ arg64.lpfc_arg5 = arg32.lpfc_arg5;
++
++ ret = lpfcdfc_do_ioctl(&arg64);
++
++ arg32.lpfc_brd = arg64.lpfc_brd;
++ arg32.lpfc_ring = arg64.lpfc_ring;
++ arg32.lpfc_iocb = arg64.lpfc_iocb;
++ arg32.lpfc_flag = arg64.lpfc_flag;
++ arg32.lpfc_arg1 = (u32)(unsigned long) arg64.lpfc_arg1;
++ arg32.lpfc_arg2 = (u32)(unsigned long) arg64.lpfc_arg2;
++ arg32.lpfc_arg3 = (u32)(unsigned long) arg64.lpfc_arg3;
++ arg32.lpfc_dataout = (u32)(unsigned long) arg64.lpfc_dataout;
++ arg32.lpfc_cmd = arg64.lpfc_cmd;
++ arg32.lpfc_outsz = arg64.lpfc_outsz;
++ arg32.lpfc_arg4 = arg64.lpfc_arg4;
++ arg32.lpfc_arg5 = arg64.lpfc_arg5;
++
++ if(copy_to_user((void __user *)arg, &arg32,
++ sizeof(struct lpfcCmdInput32)))
++ return -EFAULT;
++
++ return -ret;
++}
++#endif
++
++static struct file_operations lpfc_fops = {
++ .owner = THIS_MODULE,
++ .ioctl = lpfcdfc_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = lpfcdfc_compat_ioctl,
++#endif
++};
++
++int
++lpfc_cdev_init(void)
++{
++
++ lpfcdfc_major = register_chrdev(0, LPFC_CHAR_DEV_NAME, &lpfc_fops);
++ if (lpfcdfc_major < 0) {
++ printk(KERN_ERR "%s:%d Unable to register \"%s\" device.\n",
++ __func__, __LINE__, LPFC_CHAR_DEV_NAME);
++ return lpfcdfc_major;
++ }
++
++ return 0;
++}
++
++void
++lpfc_cdev_exit(void)
++{
++ unregister_chrdev(lpfcdfc_major, LPFC_CHAR_DEV_NAME);
++}
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_ioctl.h
+@@ -0,0 +1,184 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#define DFC_MAJOR_REV 81
++#define DFC_MINOR_REV 0
++
++#define LPFC_MAX_EVENT 128
++
++#define LPFC_CT 0x42 /* Send CT passthru command */
++#define LPFC_HBA_RNID 0x52 /* Send an RNID request */
++#define LPFC_HBA_REFRESHINFO 0x56 /* Do a refresh of the stats */
++#define LPFC_SEND_ELS 0x57 /* Send out an ELS command */
++#define LPFC_HBA_SET_EVENT 0x59 /* Set FCP event(s) */
++#define LPFC_HBA_GET_EVENT 0x5a /* Get FCP event(s) */
++#define LPFC_HBA_SEND_MGMT_CMD 0x5b /* Send a management command */
++#define LPFC_HBA_SEND_MGMT_RSP 0x5c /* Send a management response */
++
++#define LPFC_GET_DFC_REV 0x68 /* Get the rev of the ioctl
++ driver */
++#define LPFC_LOOPBACK_TEST 0x72 /* Run Loopback test */
++#define LPFC_LOOPBACK_MODE 0x73 /* Enter Loopback mode */
++/* LPFC_LAST_IOCTL_USED 0x73 Last LPFC Ioctl used */
++
++#define INTERNAL_LOOP_BACK 0x1
++#define EXTERNAL_LOOP_BACK 0x2
++
++/* the DfcRevInfo structure */
++struct DfcRevInfo {
++ uint32_t a_Major;
++ uint32_t a_Minor;
++} ;
++
++#define LPFC_WWPN_TYPE 0
++#define LPFC_PORTID_TYPE 1
++#define LPFC_WWNN_TYPE 2
++
++struct nport_id {
++ uint32_t idType; /* 0 - wwpn, 1 - d_id, 2 - wwnn */
++ uint32_t d_id;
++ uint8_t wwpn[8];
++};
++
++#define LPFC_EVENT_LIP_OCCURRED 1
++#define LPFC_EVENT_LINK_UP 2
++#define LPFC_EVENT_LINK_DOWN 3
++#define LPFC_EVENT_LIP_RESET_OCCURRED 4
++#define LPFC_EVENT_RSCN 5
++#define LPFC_EVENT_PROPRIETARY 0xFFFF
++
++struct lpfc_hba_event_info {
++ uint32_t event_code;
++ uint32_t port_id;
++ union {
++ uint32_t rscn_event_info;
++ uint32_t pty_event_info;
++ } event;
++};
++
++
++#define LPFC_CHAR_DEV_NAME "lpfcdfc"
++
++/*
++ * Diagnostic (DFC) Command & Input structures: (LPFC)
++ */
++struct lpfcCmdInput {
++ short lpfc_brd;
++ short lpfc_ring;
++ short lpfc_iocb;
++ short lpfc_flag;
++ void *lpfc_arg1;
++ void *lpfc_arg2;
++ void *lpfc_arg3;
++ char *lpfc_dataout;
++ uint32_t lpfc_cmd;
++ uint32_t lpfc_outsz;
++ uint32_t lpfc_arg4;
++ uint32_t lpfc_arg5;
++};
++/* Used for ioctl command */
++#define LPFC_DFC_CMD_IOCTL_MAGIC 0xFC
++#define LPFC_DFC_CMD_IOCTL _IOWR(LPFC_DFC_CMD_IOCTL_MAGIC, 0x1,\
++ struct lpfcCmdInput)
++
++#ifdef CONFIG_COMPAT
++/* 32 bit version */
++struct lpfcCmdInput32 {
++ short lpfc_brd;
++ short lpfc_ring;
++ short lpfc_iocb;
++ short lpfc_flag;
++ u32 lpfc_arg1;
++ u32 lpfc_arg2;
++ u32 lpfc_arg3;
++ u32 lpfc_dataout;
++ uint32_t lpfc_cmd;
++ uint32_t lpfc_outsz;
++ uint32_t lpfc_arg4;
++ uint32_t lpfc_arg5;
++};
++#endif
++
++#define SLI_CT_ELX_LOOPBACK 0x10
++
++enum ELX_LOOPBACK_CMD {
++ ELX_LOOPBACK_XRI_SETUP,
++ ELX_LOOPBACK_DATA,
++};
++
++
++struct lpfc_link_info {
++ uint32_t a_linkEventTag;
++ uint32_t a_linkUp;
++ uint32_t a_linkDown;
++ uint32_t a_linkMulti;
++ uint32_t a_DID;
++ uint8_t a_topology;
++ uint8_t a_linkState;
++ uint8_t a_alpa;
++ uint8_t a_alpaCnt;
++ uint8_t a_alpaMap[128];
++ uint8_t a_wwpName[8];
++ uint8_t a_wwnName[8];
++};
++
++enum lpfc_host_event_code {
++ LPFCH_EVT_LIP = 0x1,
++ LPFCH_EVT_LINKUP = 0x2,
++ LPFCH_EVT_LINKDOWN = 0x3,
++ LPFCH_EVT_LIPRESET = 0x4,
++ LPFCH_EVT_RSCN = 0x5,
++ LPFCH_EVT_ADAPTER_CHANGE = 0x103,
++ LPFCH_EVT_PORT_UNKNOWN = 0x200,
++ LPFCH_EVT_PORT_OFFLINE = 0x201,
++ LPFCH_EVT_PORT_ONLINE = 0x202,
++ LPFCH_EVT_PORT_FABRIC = 0x204,
++ LPFCH_EVT_LINK_UNKNOWN = 0x500,
++ LPFCH_EVT_VENDOR_UNIQUE = 0xffff,
++};
++
++#define ELX_LOOPBACK_HEADER_SZ \
++ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
++
++struct lpfc_host_event {
++ uint32_t seq_num;
++ enum lpfc_host_event_code event_code;
++ uint32_t data;
++};
++
++struct lpfc_timedout_iocb_ctxt {
++ struct lpfc_iocbq *rspiocbq;
++ struct lpfc_dmabuf *mp;
++ struct lpfc_dmabuf *bmp;
++ struct lpfc_scsi_buf *lpfc_cmd;
++ struct lpfc_dmabufext *outdmp;
++ struct lpfc_dmabufext *indmp;
++};
++
++#ifdef __KERNEL__
++struct lpfcdfc_host;
++
++/* Initialize/Un-initialize char device */
++int lpfc_cdev_init(void);
++void lpfc_cdev_exit(void);
++void lpfcdfc_host_del(struct lpfcdfc_host *);
++struct lpfcdfc_host *lpfcdfc_host_add(struct pci_dev *, struct Scsi_Host *,
++ struct lpfc_hba *);
++#endif /* __KERNEL__ */
+--- a/drivers/scsi/lpfc/lpfc_logmsg.h
++++ b/drivers/scsi/lpfc/lpfc_logmsg.h
+@@ -32,6 +32,7 @@
+ #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
+ #define LOG_LIBDFC 0x2000 /* Libdfc events */
+ #define LOG_VPORT 0x4000 /* NPIV events */
++#define LOG_SECURITY 0x8000 /* FC Security */
+ #define LOG_ALL_MSG 0xffff /* LOG all messages */
+
+ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
+--- a/drivers/scsi/lpfc/lpfc_mbox.c
++++ b/drivers/scsi/lpfc/lpfc_mbox.c
+@@ -1083,7 +1083,7 @@ lpfc_config_port(struct lpfc_hba *phba,
+ phba->pcb->feature = FEATURE_INITIAL_SLI2;
+
+ /* Setup Mailbox pointers */
+- phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
++ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
+ offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
+ pdma_addr = phba->slim2p.phys + offset;
+ phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_menlo.c
+@@ -0,0 +1,1174 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2007-2008 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#include <linux/ctype.h>
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_tcq.h>
++#include <scsi/scsi_transport_fc.h>
++
++#include "lpfc_hw.h"
++#include "lpfc_sli.h"
++#include "lpfc_nl.h"
++#include "lpfc_disc.h"
++#include "lpfc_scsi.h"
++#include "lpfc.h"
++#include "lpfc_logmsg.h"
++#include "lpfc_version.h"
++#include "lpfc_compat.h"
++#include "lpfc_crtn.h"
++#include "lpfc_vport.h"
++
++#define MENLO_CMD_FW_DOWNLOAD 0x00000002
++
++static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *,
++ struct lpfc_iocbq *, struct lpfc_iocbq *);
++
++extern int
++__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist);
++
++extern struct lpfc_dmabufext *
++__dfc_cmd_data_alloc(struct lpfc_hba * phba,
++ char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
++ int nocopydata);
++/*
++ * The size for the menlo interface is set at 336k because it only uses
++ * one bpl. A bpl can contain 85 BDE descriptors. Each BDE can represent
++ * up to 4k. I used 84 BDE entries to do this calculation because the
++ * 1st sysfs_menlo_write is for just the cmd header which is 12 bytes.
++ * size = PAGE_SZ * (sizeof(bpl) / sizeof(BDE)) -1;
++ */
++#define SYSFS_MENLO_ATTR_SIZE 344064
++typedef struct menlo_get_cmd
++{
++ uint32_t code; /* Command code */
++ uint32_t context; /* Context */
++ uint32_t length; /* Max response length */
++} menlo_get_cmd_t;
++
++typedef struct menlo_init_rsp
++{
++ uint32_t code;
++ uint32_t bb_credit; /* Menlo FC BB Credit */
++ uint32_t frame_size; /* Menlo FC receive frame size */
++ uint32_t fw_version; /* Menlo firmware version */
++ uint32_t reset_status; /* Reason for previous reset */
++
++#define MENLO_RESET_STATUS_NORMAL 0
++#define MENLO_RESET_STATUS_PANIC 1
++
++ uint32_t maint_status; /* Menlo Maintenance Mode status at link up */
++
++
++#define MENLO_MAINTENANCE_MODE_DISABLE 0
++#define MENLO_MAINTENANCE_MODE_ENABLE 1
++ uint32_t fw_type;
++ uint32_t fru_data_valid; /* 0=invalid, 1=valid */
++} menlo_init_rsp_t;
++
++#define MENLO_CMD_GET_INIT 0x00000007
++#define MENLO_FW_TYPE_OPERATIONAL 0xABCD0001
++#define MENLO_FW_TYPE_GOLDEN 0xABCD0002
++#define MENLO_FW_TYPE_DIAG 0xABCD0003
++
++void
++BE_swap32_buffer(void *srcp, uint32_t cnt)
++{
++ uint32_t *src = srcp;
++ uint32_t *dest = srcp;
++ uint32_t ldata;
++ int i;
++
++ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
++ ldata = *src;
++ ldata = cpu_to_le32(ldata);
++ *dest = ldata;
++ src++;
++ dest++;
++ }
++}
++
++
++static int
++lpfc_alloc_menlo_genrequest64(struct lpfc_hba * phba,
++ struct lpfc_menlo_genreq64 *sysfs_menlo,
++ struct lpfc_sysfs_menlo_hdr *cmdhdr)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
++ struct ulp_bde64 *bpl = NULL;
++ IOCB_t *cmd = NULL, *rsp = NULL;
++ struct lpfc_sli *psli = NULL;
++ struct lpfc_sli_ring *pring = NULL;
++ int rc = 0;
++ uint32_t cmdsize;
++ uint32_t rspsize;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++
++ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
++ rc = EACCES;
++ goto send_menlomgmt_cmd_exit;
++ }
++
++ if (!sysfs_menlo) {
++ rc = EINVAL;
++ goto send_menlomgmt_cmd_exit;
++ }
++
++ cmdsize = cmdhdr->cmdsize;
++ rspsize = cmdhdr->rspsize;
++
++ if (!cmdsize || !rspsize || (cmdsize + rspsize > 80 * BUF_SZ_4K)) {
++ rc = ERANGE;
++ goto send_menlomgmt_cmd_exit;
++ }
++
++ spin_lock_irq(shost->host_lock);
++ sysfs_menlo->cmdiocbq = lpfc_sli_get_iocbq(phba);
++ if (!sysfs_menlo->cmdiocbq) {
++ rc = ENOMEM;
++ spin_unlock_irq(shost->host_lock);
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1202 alloc_menlo_genreq64: couldn't alloc cmdiocbq\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++ cmd = &sysfs_menlo->cmdiocbq->iocb;
++
++ sysfs_menlo->rspiocbq = lpfc_sli_get_iocbq(phba);
++ if (!sysfs_menlo->rspiocbq) {
++ rc = ENOMEM;
++ spin_unlock_irq(shost->host_lock);
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1203 alloc_menlo_genreq64: couldn't alloc rspiocbq\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++ spin_unlock_irq(shost->host_lock);
++
++ rsp = &sysfs_menlo->rspiocbq->iocb;
++
++
++ sysfs_menlo->bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
++ if (!sysfs_menlo->bmp) {
++ rc = ENOMEM;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1204 alloc_menlo_genreq64: couldn't alloc bmp\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++
++ spin_lock_irq(shost->host_lock);
++ sysfs_menlo->bmp->virt = lpfc_mbuf_alloc(phba, 0,
++ &sysfs_menlo->bmp->phys);
++ if (!sysfs_menlo->bmp->virt) {
++ rc = ENOMEM;
++ spin_unlock_irq(shost->host_lock);
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1205 alloc_menlo_genreq64: couldn't alloc bpl\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++ spin_unlock_irq(shost->host_lock);
++
++ INIT_LIST_HEAD(&sysfs_menlo->bmp->list);
++ bpl = (struct ulp_bde64 *) sysfs_menlo->bmp->virt;
++ memset((uint8_t*)bpl, 0 , 1024);
++ sysfs_menlo->indmp = __dfc_cmd_data_alloc(phba, NULL, bpl, cmdsize, 1);
++ if (!sysfs_menlo->indmp) {
++ rc = ENOMEM;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1206 alloc_menlo_genreq64: couldn't alloc cmdbuf\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++ sysfs_menlo->cmdbpl = bpl;
++ INIT_LIST_HEAD(&sysfs_menlo->inhead);
++ list_add_tail(&sysfs_menlo->inhead, &sysfs_menlo->indmp->dma.list);
++
++ /* flag contains total number of BPLs for xmit */
++
++ bpl += sysfs_menlo->indmp->flag;
++
++ sysfs_menlo->outdmp = __dfc_cmd_data_alloc(phba, NULL, bpl, rspsize, 0);
++ if (!sysfs_menlo->outdmp) {
++ rc = ENOMEM;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1207 alloc_menlo_genreq64: couldn't alloc rspbuf\n");
++ goto send_menlomgmt_cmd_exit;
++ }
++ INIT_LIST_HEAD(&sysfs_menlo->outhead);
++ list_add_tail(&sysfs_menlo->outhead, &sysfs_menlo->outdmp->dma.list);
++
++ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
++ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(sysfs_menlo->bmp->phys);
++ cmd->un.genreq64.bdl.addrLow = putPaddrLow(sysfs_menlo->bmp->phys);
++ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
++ cmd->un.genreq64.bdl.bdeSize =
++ (sysfs_menlo->outdmp->flag + sysfs_menlo->indmp->flag)
++ * sizeof(struct ulp_bde64);
++ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
++ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
++ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
++ cmd->un.genreq64.w5.hcsw.Rctl = FC_FCP_CMND;
++ cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
++ cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
++ cmd->ulpBdeCount = 1;
++ cmd->ulpClass = CLASS3;
++ cmd->ulpContext = MENLO_CONTEXT; /* 0 */
++ cmd->ulpOwner = OWN_CHIP;
++ cmd->ulpPU = MENLO_PU; /* 3 */
++ cmd->ulpLe = 1; /* Limited Edition */
++ sysfs_menlo->cmdiocbq->vport = phba->pport;
++ sysfs_menlo->cmdiocbq->context1 = NULL;
++ sysfs_menlo->cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
++ /* We want the firmware to timeout before we do */
++ cmd->ulpTimeout = MENLO_TIMEOUT - 5;
++
++ sysfs_menlo->timeout = cmd->ulpTimeout;
++
++send_menlomgmt_cmd_exit:
++ return rc;
++}
++
++void
++sysfs_menlo_genreq_free(struct lpfc_hba *phba,
++ struct lpfc_menlo_genreq64 *sysfs_menlo)
++{
++ if ( !list_empty(&sysfs_menlo->outhead))
++ list_del_init( &sysfs_menlo->outhead);
++
++ if (!list_empty(&sysfs_menlo->inhead))
++ list_del_init( &sysfs_menlo->inhead);
++
++ if (sysfs_menlo->outdmp) {
++ __dfc_cmd_data_free(phba, sysfs_menlo->outdmp);
++ sysfs_menlo->outdmp = NULL;
++ }
++ if (sysfs_menlo->indmp) {
++ __dfc_cmd_data_free(phba, sysfs_menlo->indmp);
++ sysfs_menlo->indmp = NULL;
++ }
++ if (sysfs_menlo->bmp) {
++ lpfc_mbuf_free(phba, sysfs_menlo->bmp->virt,
++ sysfs_menlo->bmp->phys);
++ kfree(sysfs_menlo->bmp);
++ sysfs_menlo->bmp = NULL;
++ }
++ if (sysfs_menlo->rspiocbq) {
++ lpfc_sli_release_iocbq(phba, sysfs_menlo->rspiocbq);
++ sysfs_menlo->rspiocbq = NULL;
++ }
++
++ if (sysfs_menlo->cmdiocbq) {
++ lpfc_sli_release_iocbq(phba, sysfs_menlo->cmdiocbq);
++ sysfs_menlo->cmdiocbq = NULL;
++ }
++}
++
++static void
++sysfs_menlo_idle(struct lpfc_hba *phba,
++ struct lpfc_sysfs_menlo *sysfs_menlo)
++{
++ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
++
++ spin_lock_irq(&phba->hbalock);
++ list_del_init(&sysfs_menlo->list);
++ spin_unlock_irq(&phba->hbalock);
++ spin_lock_irq(shost->host_lock);
++
++ if (sysfs_menlo->cr.cmdiocbq)
++ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cr);
++ if (sysfs_menlo->cx.cmdiocbq)
++ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cx);
++
++ spin_unlock_irq(shost->host_lock);
++ kfree(sysfs_menlo);
++}
++
++static void
++lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *phba,
++ struct lpfc_iocbq *cmdq,
++ struct lpfc_iocbq *rspq)
++{
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1241 Menlo IOCB timeout: deleting %p\n",
++ cmdq->context3);
++ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context3);
++}
++
++static void
++lpfc_menlo_iocb_cmpl(struct lpfc_hba *phba,
++ struct lpfc_iocbq *cmdq,
++ struct lpfc_iocbq *rspq)
++{
++ struct lpfc_sysfs_menlo * sysfs_menlo =
++ (struct lpfc_sysfs_menlo *)cmdq->context2;
++ struct lpfc_dmabufext *mlast = NULL;
++ IOCB_t *rsp = NULL;
++ IOCB_t *cmd = NULL;
++ uint32_t * tmpptr = NULL;
++ menlo_init_rsp_t *mlorsp = NULL;
++
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1254 Menlo IOCB complete: %p\n",
++ cmdq->context2);
++ rsp = &rspq->iocb;
++ cmd = &cmdq->iocb;
++ if ( !sysfs_menlo ) {
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1255 Menlo IOCB complete:NULL CTX \n");
++ return;
++ }
++ if ( rsp->ulpStatus ) {
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1242 iocb async cmpl: ulpStatus 0x%x "
++ "ulpWord[4] 0x%x\n",
++ rsp->ulpStatus, rsp->un.ulpWord[4]);
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1260 cr:%.08x %.08x %.08x %.08x "
++ "%.08x %.08x %.08x %.08x\n",
++ cmd->un.ulpWord[0], cmd->un.ulpWord[1],
++ cmd->un.ulpWord[2], cmd->un.ulpWord[3],
++ cmd->un.ulpWord[4], cmd->un.ulpWord[5],
++ *(uint32_t *)&cmd->un1, *((uint32_t *)&cmd->un1 + 1));
++ mlast = list_get_first(&sysfs_menlo->cr.inhead,
++ struct lpfc_dmabufext,
++ dma.list);
++ if (!mlast) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1231 bad bpl:\n");
++ goto lpfc_menlo_iocb_cmpl_ext;
++ }
++ tmpptr = ( uint32_t *) mlast->dma.virt;
++ BE_swap32_buffer ((uint8_t *) tmpptr,
++ sizeof( menlo_get_cmd_t));
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1261 cmd:%.08x %.08x %.08x\n",
++ *tmpptr, *(tmpptr+1), *(tmpptr+2));
++ goto lpfc_menlo_iocb_cmpl_ext;
++ }
++
++ mlast = list_get_first(&sysfs_menlo->cr.outhead,
++ struct lpfc_dmabufext,
++ dma.list);
++ if (!mlast) {
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1256 bad bpl:\n");
++ goto lpfc_menlo_iocb_cmpl_ext;
++ }
++ mlorsp = ( menlo_init_rsp_t *) mlast->dma.virt;
++ BE_swap32_buffer ((uint8_t *) mlorsp,
++ sizeof( menlo_init_rsp_t));
++
++ if (mlorsp->code != 0) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1243 Menlo command error. code=%d.\n", mlorsp->code);
++ goto lpfc_menlo_iocb_cmpl_ext;
++
++ }
++
++ switch (mlorsp->fw_type)
++ {
++ case MENLO_FW_TYPE_OPERATIONAL: /* Menlo Operational */
++ break;
++ case MENLO_FW_TYPE_GOLDEN: /* Menlo Golden */
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1246 FCoE chip is running golden firmware. "
++ "Update FCoE chip firmware immediately %x\n",
++ mlorsp->fw_type);
++ break;
++ case MENLO_FW_TYPE_DIAG: /* Menlo Diag */
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1247 FCoE chip is running diagnostic "
++ "firmware. Operational use suspended. %x\n",
++ mlorsp->fw_type);
++ break;
++ default:
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1248 FCoE chip is running unknown "
++ "firmware x%x.\n", mlorsp->fw_type);
++ break;
++ }
++ if (!mlorsp->fru_data_valid
++ && (mlorsp->fw_type == MENLO_FW_TYPE_OPERATIONAL)
++ && (!mlorsp->maint_status))
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1249 Invalid FRU data found on adapter."
++ "Return adapter to Emulex for repair\n");
++
++lpfc_menlo_iocb_cmpl_ext:
++ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context2);
++}
++
++static struct lpfc_sysfs_menlo *
++lpfc_get_sysfs_menlo(struct lpfc_hba *phba, uint8_t create)
++{
++ struct lpfc_sysfs_menlo *sysfs_menlo;
++ pid_t pid;
++
++ pid = current->pid;
++
++ spin_lock_irq(&phba->hbalock);
++ list_for_each_entry(sysfs_menlo, &phba->sysfs_menlo_list, list) {
++ if (sysfs_menlo->pid == pid) {
++ spin_unlock_irq(&phba->hbalock);
++ return sysfs_menlo;
++ }
++ }
++ if (!create) {
++ spin_unlock_irq(&phba->hbalock);
++ return NULL;
++ }
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_menlo = kzalloc(sizeof(struct lpfc_sysfs_menlo),
++ GFP_KERNEL);
++ if (!sysfs_menlo)
++ return NULL;
++ sysfs_menlo->state = SMENLO_IDLE;
++ sysfs_menlo->pid = pid;
++ spin_lock_irq(&phba->hbalock);
++ list_add_tail(&sysfs_menlo->list, &phba->sysfs_menlo_list);
++
++ spin_unlock_irq(&phba->hbalock);
++ return sysfs_menlo;
++
++}
++
++static ssize_t
++lpfc_menlo_write(struct lpfc_hba *phba,
++ char *buf, loff_t off, size_t count)
++{
++ struct lpfc_sysfs_menlo *sysfs_menlo;
++ struct lpfc_dmabufext *mlast = NULL;
++ struct lpfc_sysfs_menlo_hdr cmdhdrCR;
++ struct lpfc_menlo_genreq64 *genreq = NULL;
++ loff_t temp_off = 0;
++ struct ulp_bde64 *bpl = NULL;
++ int mlastcnt = 0;
++ uint32_t * tmpptr = NULL;
++ uint32_t addr_high = 0;
++ uint32_t addr_low = 0;
++ int hdr_offset = sizeof(struct lpfc_sysfs_menlo_hdr);
++
++ if (off % 4 || count % 4 || (unsigned long)buf % 4)
++ return -EINVAL;
++
++ if (count == 0)
++ return 0;
++
++ if (off == 0) {
++ ssize_t rc;
++ struct lpfc_sysfs_menlo_hdr *cmdhdr =
++ (struct lpfc_sysfs_menlo_hdr *)buf;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1208 menlo_write: cmd %x cmdsz %d rspsz %d\n",
++ cmdhdr->cmd, cmdhdr->cmdsize,
++ cmdhdr->rspsize);
++ if (count != sizeof(struct lpfc_sysfs_menlo_hdr)) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1210 Invalid cmd size: cmd %x "
++ "cmdsz %d rspsz %d\n",
++ cmdhdr->cmd, cmdhdr->cmdsize,
++ cmdhdr->rspsize);
++ return -EINVAL;
++ }
++
++ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 1);
++ if (!sysfs_menlo)
++ return -ENOMEM;
++ sysfs_menlo->cmdhdr = *cmdhdr;
++ if (cmdhdr->cmd == MENLO_CMD_FW_DOWNLOAD) {
++ sysfs_menlo->cmdhdr.cmdsize
++ -= sizeof(struct lpfc_sysfs_menlo_hdr);
++
++ rc = lpfc_alloc_menlo_genrequest64(phba,
++ &sysfs_menlo->cx,
++ &sysfs_menlo->cmdhdr);
++ if (rc != 0) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1211 genreq alloc failed: %d\n",
++ (int) rc);
++ sysfs_menlo_idle(phba,sysfs_menlo);
++ return -ENOMEM;
++ }
++ cmdhdrCR.cmd = cmdhdr->cmd;
++ cmdhdrCR.cmdsize = sizeof(struct lpfc_sysfs_menlo_hdr);
++ cmdhdrCR.rspsize = 4;
++ } else
++ cmdhdrCR = *cmdhdr;
++
++ rc = lpfc_alloc_menlo_genrequest64(phba,
++ &sysfs_menlo->cr,&cmdhdrCR);
++ if (rc != 0) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1223 menlo_write: couldn't alloc genreq %d\n",
++ (int) rc);
++ sysfs_menlo_idle(phba,sysfs_menlo);
++ return -ENOMEM;
++ }
++ } else {
++ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
++ if (!sysfs_menlo)
++ return -EAGAIN;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1212 menlo_write: sysfs_menlo %p cmd %x cmdsz %d"
++ " rspsz %d cr-off %d cx-off %d count %d\n",
++ sysfs_menlo,
++ sysfs_menlo->cmdhdr.cmd,
++ sysfs_menlo->cmdhdr.cmdsize,
++ sysfs_menlo->cmdhdr.rspsize,
++ (int)sysfs_menlo->cr.offset,
++ (int)sysfs_menlo->cx.offset,
++ (int)count);
++ }
++
++ if ((count + sysfs_menlo->cr.offset) > sysfs_menlo->cmdhdr.cmdsize) {
++ if ( sysfs_menlo->cmdhdr.cmdsize != 4) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1213 FCoE cmd overflow: off %d + cnt %d > cmdsz %d\n",
++ (int)sysfs_menlo->cr.offset,
++ (int)count,
++ (int)sysfs_menlo->cmdhdr.cmdsize);
++ sysfs_menlo_idle(phba, sysfs_menlo);
++ return -ERANGE;
++ }
++ }
++
++ spin_lock_irq(&phba->hbalock);
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
++ genreq = &sysfs_menlo->cx;
++ else
++ genreq = &sysfs_menlo->cr;
++
++ if (off == 0) {
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
++ tmpptr = NULL;
++ genreq = &sysfs_menlo->cr;
++
++ if (!mlast) {
++ mlast = list_get_first(&genreq->inhead,
++ struct lpfc_dmabufext,
++ dma.list);
++ }
++ if (mlast) {
++ bpl = genreq->cmdbpl;
++ memcpy((uint8_t *) mlast->dma.virt, buf, count);
++ genreq->offset += count;
++ tmpptr = (uint32_t *)mlast->dma.virt;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1258 cmd %x cmdsz %d rspsz %d "
++ "copied %d addrL:%x addrH:%x\n",
++ *tmpptr,
++ *(tmpptr+1),
++ *(tmpptr+2),
++ (int)count,
++ bpl->addrLow,bpl->addrHigh);
++ } else {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1230 Could not find buffer for FCoE"
++ " cmd:off %d indmp %p %d\n", (int)off,
++ genreq->indmp,(int)count);
++ }
++ }
++
++ sysfs_menlo->state = SMENLO_WRITING;
++ spin_unlock_irq(&phba->hbalock);
++ return count;
++ } else {
++ ssize_t adj_off = off - sizeof(struct lpfc_sysfs_menlo_hdr);
++ int found = 0;
++ if (sysfs_menlo->state != SMENLO_WRITING ||
++ genreq->offset != adj_off) {
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_menlo_idle(phba, sysfs_menlo);
++ return -EAGAIN;
++ }
++ mlast = NULL;
++ temp_off = sizeof(struct lpfc_sysfs_menlo_hdr);
++ if (genreq->indmp) {
++ list_for_each_entry(mlast,
++ &genreq->inhead, dma.list) {
++ if (temp_off == off)
++ break;
++ else
++ temp_off += BUF_SZ_4K;
++ mlastcnt++;
++ }
++ }
++ addr_low = le32_to_cpu( putPaddrLow(mlast->dma.phys) );
++ addr_high = le32_to_cpu( putPaddrHigh(mlast->dma.phys) );
++ bpl = genreq->cmdbpl;
++ bpl += mlastcnt;
++ if (bpl->addrLow != addr_low || bpl->addrHigh != addr_high) {
++ mlast = NULL;
++ list_for_each_entry(mlast,
++ &genreq->inhead, dma.list) {
++
++ addr_low = le32_to_cpu(
++ putPaddrLow(mlast->dma.phys) );
++ addr_high = le32_to_cpu(
++ putPaddrHigh(mlast->dma.phys) );
++ if (bpl->addrLow == addr_low
++ && bpl->addrHigh == addr_high) {
++ found = 1;
++ break;
++ }
++ if ( mlastcnt < 3 )
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1234 menlo_write: off:%d "
++ " mlastcnt:%d addl:%x addl:%x "
++ " addrh:%x addrh:%x mlast:%p\n",
++ (int)genreq->offset,
++ mlastcnt,
++ bpl->addrLow,
++ addr_low,
++ bpl->addrHigh,
++ addr_high,mlast);
++ }
++ } else
++ found = 1;
++
++ if (!found) {
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1235 Could not find buffer for FCoE"
++ " cmd: off:%d poff:%d cnt:%d"
++ " mlastcnt:%d addl:%x addh:%x mdsz:%d \n",
++ (int)genreq->offset,
++ (int)off,
++ (int)count,
++ mlastcnt,
++ bpl->addrLow,
++ bpl->addrHigh,
++ (int)sysfs_menlo->cmdhdr.cmdsize);
++ mlast = NULL;
++ }
++
++ }
++
++ if (mlast) {
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD ) {
++ bpl = genreq->cmdbpl;
++ bpl += mlastcnt;
++ tmpptr = (uint32_t *)mlast->dma.virt;
++ if ( genreq->offset < hdr_offset ) {
++ memcpy((uint8_t *) mlast->dma.virt,
++ buf+hdr_offset,
++ count-hdr_offset);
++ bpl->tus.f.bdeSize = (ushort)count-hdr_offset;
++ mlast->size = (ushort)count-hdr_offset;
++ } else {
++ memcpy((uint8_t *) mlast->dma.virt, buf, count);
++ bpl->tus.f.bdeSize = (ushort)count;
++ mlast->size = (ushort)count;
++ }
++ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
++ bpl->tus.w = le32_to_cpu(bpl->tus.w);
++
++ } else
++ memcpy((uint8_t *) mlast->dma.virt, buf, count);
++
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD
++ && genreq->offset < hdr_offset) {
++ if (sysfs_menlo->cr.indmp
++ && sysfs_menlo->cr.indmp->dma.virt) {
++ mlast = sysfs_menlo->cr.indmp;
++ memcpy((uint8_t *) mlast->dma.virt,
++ buf, hdr_offset);
++ tmpptr = (uint32_t *)mlast->dma.virt;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1237 cmd %x cmd1 %x cmd2 %x "
++ "copied %d\n",
++ *tmpptr,
++ *(tmpptr+1),
++ *(tmpptr+2),
++ hdr_offset);
++ }
++ }
++ genreq->offset += count;
++ } else {
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_menlo_idle(phba,sysfs_menlo);
++ return -ERANGE;
++ }
++
++ spin_unlock_irq(&phba->hbalock);
++ return count;
++
++}
++
++
++static ssize_t
++sysfs_menlo_write(struct kobject *kobj, struct bin_attribute *bin_attr,
++ char *buf, loff_t off, size_t count)
++{
++ struct device *dev = container_of(kobj, struct device, kobj);
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ struct lpfc_hba *phba = vport->phba;
++
++ return lpfc_menlo_write(phba, buf, off, count);
++}
++
++
++static ssize_t
++sysfs_menlo_issue_iocb_wait(struct lpfc_hba *phba,
++ struct lpfc_menlo_genreq64 *req,
++ struct lpfc_sysfs_menlo *sysfs_menlo)
++{
++ struct lpfc_sli *psli = NULL;
++ struct lpfc_sli_ring *pring = NULL;
++ int rc = 0;
++ IOCB_t *rsp = NULL;
++ struct lpfc_iocbq *cmdiocbq = NULL;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++ rsp = &req->rspiocbq->iocb;
++ cmdiocbq = req->cmdiocbq;
++
++ rc = lpfc_sli_issue_iocb_wait(phba, pring, req->cmdiocbq, req->rspiocbq,
++ req->timeout);
++
++ if (rc == IOCB_TIMEDOUT) {
++
++ cmdiocbq->context2 = NULL;
++ cmdiocbq->context3 = sysfs_menlo;
++ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
++ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
++ "1227 FCoE IOCB TMO: handler set for %p\n",
++ cmdiocbq->context3);
++ return -EACCES;
++ }
++
++ if (rc != IOCB_SUCCESS) {
++ rc = -EFAULT;
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1216 FCoE IOCB failed: off %d rc=%d \n",
++ (int)req->offset, rc);
++ goto sysfs_menlo_issue_iocb_wait_exit;
++ }
++
++ if (rsp->ulpStatus) {
++ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
++ switch (rsp->un.ulpWord[4] & 0xff) {
++ case IOERR_SEQUENCE_TIMEOUT:
++ rc = -ETIMEDOUT;
++ break;
++ case IOERR_INVALID_RPI:
++ rc = -EFAULT;
++ break;
++ default:
++ rc = -EFAULT;
++ break;
++ }
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1217 mlo_issueIocb:2 off %d rc=%d "
++ "ulpWord[4] 0x%x\n",
++ (int)req->offset, rc, rsp->un.ulpWord[4]);
++ }
++ }
++sysfs_menlo_issue_iocb_wait_exit:
++ return rc;
++}
++
++
++static ssize_t
++sysfs_menlo_issue_iocb(struct lpfc_hba *phba, struct lpfc_menlo_genreq64 *req,
++ struct lpfc_sysfs_menlo *sysfs_menlo)
++{
++ struct lpfc_sli *psli = NULL;
++ struct lpfc_sli_ring *pring = NULL;
++ int rc = 0;
++ IOCB_t *rsp = NULL;
++ struct lpfc_iocbq *cmdiocbq = NULL;
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++ rsp = &req->rspiocbq->iocb;
++ cmdiocbq = req->cmdiocbq;
++ cmdiocbq->context2 = sysfs_menlo;
++ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_cmpl;
++ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
++ "1257 lpfc_menlo_issue_iocb: handler set for %p\n",
++ cmdiocbq->context3);
++
++ rc = lpfc_sli_issue_iocb(phba, pring, req->cmdiocbq, 0);
++
++ if (rc == IOCB_TIMEDOUT) {
++
++ cmdiocbq->context2 = NULL;
++ cmdiocbq->context3 = sysfs_menlo;
++ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
++ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
++ "1228 FCoE IOCB TMO: handler set for %p\n",
++ cmdiocbq->context3);
++ return -EACCES;
++ }
++
++ if (rc != IOCB_SUCCESS) {
++ rc = -EFAULT;
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1238 FCoE IOCB failed: off %d rc=%d \n",
++ (int)req->offset, rc);
++ goto sysfs_menlo_issue_iocb_exit;
++ }
++
++ if (rsp->ulpStatus) {
++ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
++ switch (rsp->un.ulpWord[4] & 0xff) {
++ case IOERR_SEQUENCE_TIMEOUT:
++ rc = -ETIMEDOUT;
++ break;
++ case IOERR_INVALID_RPI:
++ rc = -EFAULT;
++ break;
++ default:
++ rc = -EFAULT;
++ break;
++ }
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1239 mlo_issueIocb:2 off %d rc=%d "
++ "ulpWord[4] 0x%x\n",
++ (int)req->offset, rc, rsp->un.ulpWord[4]);
++ }
++ }
++sysfs_menlo_issue_iocb_exit:
++ return rc;
++}
++
++static ssize_t
++lpfc_menlo_read(struct lpfc_hba *phba, char *buf, loff_t off, size_t count,
++ int wait)
++{
++ struct lpfc_sli *psli = NULL;
++ struct lpfc_sli_ring *pring = NULL;
++ int rc = 0;
++ struct lpfc_sysfs_menlo *sysfs_menlo;
++ struct lpfc_dmabufext *mlast = NULL;
++ loff_t temp_off = 0;
++ struct lpfc_menlo_genreq64 *genreq = NULL;
++ IOCB_t *cmd = NULL, *rsp = NULL;
++ uint32_t * uptr = NULL;
++
++
++ psli = &phba->sli;
++ pring = &psli->ring[LPFC_ELS_RING];
++
++ if (off > SYSFS_MENLO_ATTR_SIZE)
++ return -ERANGE;
++
++ if ((count + off) > SYSFS_MENLO_ATTR_SIZE)
++ count = SYSFS_MENLO_ATTR_SIZE - off;
++
++ if (off % 4 || count % 4 || (unsigned long)buf % 4)
++ return -EINVAL;
++
++ if (off && count == 0)
++ return 0;
++
++ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
++
++ if (!sysfs_menlo)
++ return -EPERM;
++
++ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
++ sysfs_menlo_idle(phba, sysfs_menlo);
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1214 Can not issue FCoE cmd,"
++ " SLI not active: off %d rc= -EACCESS\n",
++ (int)off);
++ return -EACCES;
++ }
++
++
++ if ((phba->link_state < LPFC_LINK_UP)
++ && !(psli->sli_flag & LPFC_MENLO_MAINT)
++ && wait) {
++ rc = -EPERM;
++ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
++ "1215 Can not issue FCoE cmd:"
++ " not ready or not in maint mode"
++ " off %d rc=%d \n",
++ (int)off, rc);
++ spin_lock_irq(&phba->hbalock);
++ goto lpfc_menlo_read_err_exit;
++ }
++
++ if (off == 0 && sysfs_menlo->state == SMENLO_WRITING) {
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
++ spin_lock_irq(&phba->hbalock);
++ genreq = &sysfs_menlo->cr;
++ spin_unlock_irq(&phba->hbalock);
++ }
++ if ( wait )
++ rc = sysfs_menlo_issue_iocb_wait(phba,
++ &sysfs_menlo->cr,
++ sysfs_menlo);
++ else {
++ rc = sysfs_menlo_issue_iocb(phba,
++ &sysfs_menlo->cr,
++ sysfs_menlo);
++ return rc;
++ }
++
++ spin_lock_irq(&phba->hbalock);
++ if (rc < 0) {
++ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
++ "1224 FCoE iocb failed: off %d rc=%d \n",
++ (int)off, rc);
++ if (rc != -EACCES)
++ goto lpfc_menlo_read_err_exit;
++ else {
++ spin_unlock_irq(&phba->hbalock);
++ return rc;
++ }
++ }
++
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
++ cmd = &sysfs_menlo->cx.cmdiocbq->iocb;
++ rsp = &sysfs_menlo->cr.rspiocbq->iocb;
++ mlast = list_get_first(&sysfs_menlo->cr.outhead,
++ struct lpfc_dmabufext,
++ dma.list);
++ if ( *((uint32_t *) mlast->dma.virt) != 0 ) {
++ memcpy(buf,(uint8_t *) mlast->dma.virt, count);
++ goto lpfc_menlo_read_err_exit;
++ }
++ mlast = NULL;
++
++ cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
++ cmd->ulpContext = rsp->ulpContext;
++ cmd->ulpPU = 1; /* RelOffset */
++ cmd->un.ulpWord[4] = 0; /* offset 0 */
++
++ spin_unlock_irq(&phba->hbalock);
++ rc = sysfs_menlo_issue_iocb_wait(phba, &sysfs_menlo->cx,
++ sysfs_menlo);
++ spin_lock_irq(&phba->hbalock);
++ if (rc < 0) {
++ uptr = (uint32_t *) rsp;
++
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1225 menlo_read: off %d rc=%d "
++ "rspxri %d cmdxri %d \n",
++ (int)off, rc, rsp->ulpContext,
++ cmd->ulpContext);
++ uptr = (uint32_t *)
++ &sysfs_menlo->cr.cmdiocbq->iocb;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1236 cr:%.08x %.08x %.08x %.08x "
++ "%.08x %.08x %.08x %.08x %.08x\n",
++ *uptr, *(uptr+1), *(uptr+2),
++ *(uptr+3), *(uptr+4), *(uptr+5),
++ *(uptr+6), *(uptr+7), *(uptr+8));
++ uptr = (uint32_t *)rsp;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1232 cr-rsp:%.08x %.08x %.08x %.08x "
++ "%.08x %.08x %.08x %.08x %.08x\n",
++ *uptr, *(uptr+1), *(uptr+2),
++ *(uptr+3), *(uptr+4), *(uptr+5),
++ *(uptr+6), *(uptr+7), *(uptr+8));
++ uptr = (uint32_t *)cmd;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1233 cx:%.08x %.08x %.08x %.08x "
++ "%.08x %.08x %.08x %.08x %.08x\n",
++ *uptr, *(uptr+1), *(uptr+2),
++ *(uptr+3), *(uptr+4), *(uptr+5),
++ *(uptr+6), *(uptr+7), *(uptr+8));
++ if (rc != -EACCES)
++ goto lpfc_menlo_read_err_exit;
++ else {
++ spin_unlock_irq(&phba->hbalock);
++ return rc;
++ }
++ }
++ }
++ sysfs_menlo->state = SMENLO_READING;
++ sysfs_menlo->cr.offset = 0;
++
++ } else
++ spin_lock_irq(&phba->hbalock);
++
++ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
++ genreq = &sysfs_menlo->cx;
++ else
++ genreq = &sysfs_menlo->cr;
++
++ /* Copy back response data */
++ if (sysfs_menlo->cmdhdr.rspsize > count) {
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1218 MloMgnt Rqst err Data: x%x %d %d %d %d\n",
++ genreq->outdmp->flag,
++ sysfs_menlo->cmdhdr.rspsize,
++ (int)count, (int)off, (int)genreq->offset);
++ }
++
++ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
++ rc = -EAGAIN;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1219 menlo_read:4 off %d rc=%d \n",
++ (int)off, rc);
++ goto lpfc_menlo_read_err_exit;
++ }
++ else if ( sysfs_menlo->state != SMENLO_READING) {
++ rc = -EAGAIN;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1220 menlo_read:5 off %d reg off %d rc=%d state %x\n",
++ (int)off,(int)genreq->offset, sysfs_menlo->state, rc);
++ goto lpfc_menlo_read_err_exit;
++ }
++ temp_off = 0;
++ mlast = NULL;
++ list_for_each_entry(mlast, &genreq->outhead, dma.list) {
++ if (temp_off == off)
++ break;
++ else
++ temp_off += BUF_SZ_4K;
++ }
++ if (mlast)
++ memcpy(buf,(uint8_t *) mlast->dma.virt, count);
++ else {
++ rc = -ERANGE;
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1221 menlo_read:6 off %d rc=%d \n",
++ (int)off, rc);
++ goto lpfc_menlo_read_err_exit;
++ }
++ genreq->offset += count;
++
++
++ if (genreq->offset >= sysfs_menlo->cmdhdr.rspsize) {
++ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
++ "1222 menlo_read: done off %d rc=%d"
++ " cnt %d rsp_code %x\n",
++ (int)off, rc, (int)count,*((uint32_t *)buf));
++ rc = count;
++ goto lpfc_menlo_read_err_exit;
++ }
++
++ if (count >= sysfs_menlo->cmdhdr.rspsize)
++ rc = sysfs_menlo->cmdhdr.rspsize;
++ else /* Can there be a > 4k response */
++ rc = count;
++ if (genreq->offset < sysfs_menlo->cmdhdr.rspsize) {
++ spin_unlock_irq(&phba->hbalock);
++ return rc;
++ }
++
++lpfc_menlo_read_err_exit:
++ spin_unlock_irq(&phba->hbalock);
++ sysfs_menlo_idle(phba,sysfs_menlo);
++ return rc;
++}
++
++
++static ssize_t
++sysfs_menlo_read(struct kobject *kobj, struct bin_attribute *bin_attr,
++ char *buf, loff_t off, size_t count)
++{
++ struct device *dev = container_of(kobj, struct device, kobj);
++ struct Scsi_Host *shost = class_to_shost(dev);
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++ struct lpfc_hba *phba = vport->phba;
++
++ return lpfc_menlo_read(phba, buf, off, count, 1);
++}
++int need_non_blocking = 0;
++void lpfc_check_menlo_cfg(struct lpfc_hba *phba)
++{
++ uint32_t cmd_size;
++ uint32_t rsp_size;
++ menlo_get_cmd_t *cmd = NULL;
++ menlo_init_rsp_t *rsp = NULL;
++ int rc = 0;
++
++ lpfc_printf_log (phba, KERN_INFO, LOG_LINK_EVENT,
++ "1253 Checking FCoE chip firmware.\n");
++ if ( need_non_blocking ) /* Need non blocking issue_iocb */
++ return;
++
++ cmd_size = sizeof (menlo_get_cmd_t);
++ cmd = kmalloc(cmd_size, GFP_KERNEL);
++ if (!cmd ) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1240 Unable to allocate command buffer memory.\n");
++ return;
++ }
++
++ rsp_size = sizeof (menlo_init_rsp_t);
++ rsp = kmalloc(rsp_size, GFP_KERNEL);
++ if (!rsp ) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1244 Unable to allocate response buffer memory.\n");
++ kfree(rsp);
++ return;
++ }
++
++ memset(cmd,0, cmd_size);
++ memset(rsp,0, rsp_size);
++
++ cmd->code = MENLO_CMD_GET_INIT;
++ cmd->context = cmd_size;
++ cmd->length = rsp_size;
++ rc = lpfc_menlo_write (phba, (char *) cmd, 0, cmd_size);
++ if ( rc != cmd_size ) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1250 Menlo command error. code=%d.\n", rc);
++
++ kfree (cmd);
++ kfree (rsp);
++ return;
++ }
++ cmd->code = MENLO_CMD_GET_INIT;
++ cmd->context = 0;
++ cmd->length = rsp_size;
++ BE_swap32_buffer ((uint8_t *) cmd, cmd_size);
++ rc = lpfc_menlo_write (phba, (char *) cmd, cmd_size, cmd_size);
++ if ( rc != cmd_size ) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1251 Menlo command error. code=%d.\n", rc);
++
++ kfree (cmd);
++ kfree (rsp);
++ return;
++ }
++ rc = lpfc_menlo_read (phba, (char *) rsp, 0, rsp_size,0);
++ if ( rc && rc != rsp_size ) {
++ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
++ "1252 Menlo command error. code=%d.\n", rc);
++
++ }
++ kfree (cmd);
++ kfree (rsp);
++ return;
++}
++
++struct bin_attribute sysfs_menlo_attr = {
++ .attr = {
++ .name = "menlo",
++ .mode = S_IRUSR | S_IWUSR,
++ .owner = THIS_MODULE,
++ },
++ .size = SYSFS_MENLO_ATTR_SIZE,
++ .read = sysfs_menlo_read,
++ .write = sysfs_menlo_write,
++};
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -42,7 +42,6 @@
+
+ #define LPFC_RESET_WAIT 2
+ #define LPFC_ABORT_WAIT 2
+-
+ /**
+ * lpfc_update_stats: Update statistical data for the command completion.
+ * @phba: Pointer to HBA object.
+@@ -336,6 +335,22 @@ lpfc_scsi_dev_block(struct lpfc_hba *phb
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
++void
++lpfc_scsi_dev_rescan(struct lpfc_hba *phba)
++{
++ struct lpfc_vport **vports;
++ struct Scsi_Host *shost;
++ int i;
++
++ vports = lpfc_create_vport_work_array(phba);
++ if (vports != NULL)
++ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
++ shost = lpfc_shost_from_vport(vports[i]);
++ scsi_scan_host(shost);
++ }
++ lpfc_destroy_vport_work_array(phba, vports);
++}
++
+ /*
+ * This routine allocates a scsi buffer, which contains all the necessary
+ * information needed to initiate a SCSI I/O. The non-DMAable buffer region
+@@ -1841,22 +1856,3 @@ struct scsi_host_template lpfc_template
+ .max_sectors = 0xFFFF,
+ };
+
+-struct scsi_host_template lpfc_vport_template = {
+- .module = THIS_MODULE,
+- .name = LPFC_DRIVER_NAME,
+- .info = lpfc_info,
+- .queuecommand = lpfc_queuecommand,
+- .eh_abort_handler = lpfc_abort_handler,
+- .eh_device_reset_handler= lpfc_device_reset_handler,
+- .eh_bus_reset_handler = lpfc_bus_reset_handler,
+- .slave_alloc = lpfc_slave_alloc,
+- .slave_configure = lpfc_slave_configure,
+- .slave_destroy = lpfc_slave_destroy,
+- .scan_finished = lpfc_scan_finished,
+- .this_id = -1,
+- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+- .cmd_per_lun = LPFC_CMD_PER_LUN,
+- .use_clustering = ENABLE_CLUSTERING,
+- .shost_attrs = lpfc_vport_attrs,
+- .max_sectors = 0xFFFF,
+-};
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_security.c
+@@ -0,0 +1,339 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++
++#include <scsi/scsi_tcq.h>
++#include <scsi/scsi_transport_fc.h>
++
++#include "lpfc_hw.h"
++#include "lpfc_sli.h"
++#include "lpfc_nl.h"
++#include "lpfc_disc.h"
++#include "lpfc.h"
++#include "lpfc_crtn.h"
++#include "lpfc_logmsg.h"
++#include "lpfc_security.h"
++#include "lpfc_auth_access.h"
++#include "lpfc_vport.h"
++
++uint8_t lpfc_security_service_state = SECURITY_OFFLINE;
++
++void
++lpfc_security_service_online(struct Scsi_Host *shost)
++{
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++
++ lpfc_security_service_state = SECURITY_ONLINE;
++ if (vport->cfg_enable_auth &&
++ vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN &&
++ vport->phba->link_state == LPFC_HBA_ERROR)
++ lpfc_selective_reset(vport->phba);
++}
++
++void
++lpfc_security_service_offline(struct Scsi_Host *shost)
++{
++ lpfc_security_service_state = SECURITY_OFFLINE;
++}
++
++void
++lpfc_security_config(struct Scsi_Host *shost, int status, void *rsp)
++{
++ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp;
++ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++ struct lpfc_nodelist *ndlp;
++ uint32_t old_interval, new_interval;
++ unsigned long new_jiffies, temp_jiffies;
++ uint8_t last_auth_mode;
++
++ if (status)
++ return;
++ ndlp = lpfc_findnode_did(vport, Fabric_DID);
++ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
++ return;
++
++ vport->auth.bidirectional =
++ auth_rsp->u.dhchap_security_config.bidirectional;
++ memcpy(&vport->auth.hash_priority[0],
++ &auth_rsp->u.dhchap_security_config.hash_priority[0],
++ sizeof(vport->auth.hash_priority));
++ vport->auth.hash_len = auth_rsp->u.dhchap_security_config.hash_len;
++ memcpy(&vport->auth.dh_group_priority[0],
++ &auth_rsp->u.dhchap_security_config.
++ dh_group_priority[0],
++ sizeof(vport->auth.dh_group_priority));
++ vport->auth.dh_group_len =
++ auth_rsp->u.dhchap_security_config.dh_group_len;
++ old_interval = vport->auth.reauth_interval;
++ vport->auth.reauth_interval =
++ auth_rsp->u.dhchap_security_config.reauth_interval;
++ new_interval = vport->auth.reauth_interval;
++ /*
++ * If interval changed we need to adjust the running timer
++ * If enabled then start timer now.
++ * If disabled then stop the timer.
++ * If changed to chorter then elapsed time, then set to fire now
++ * If changed to longer than elapsed time, extend the timer.
++ */
++ if (old_interval != new_interval &&
++ vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
++ new_jiffies = msecs_to_jiffies(new_interval * 60000);
++ del_timer_sync(&ndlp->nlp_reauth_tmr);
++ if (old_interval == 0)
++ temp_jiffies = jiffies + new_jiffies;
++ if (new_interval == 0)
++ temp_jiffies = 0;
++ else if (new_jiffies < (jiffies - vport->auth.last_auth))
++ temp_jiffies = jiffies + msecs_to_jiffies(1);
++ else
++ temp_jiffies = jiffies + (new_jiffies -
++ (jiffies - vport->auth.last_auth));
++ if (temp_jiffies)
++ mod_timer(&ndlp->nlp_reauth_tmr, temp_jiffies);
++ }
++ last_auth_mode = vport->auth.auth_mode;
++ vport->auth.auth_mode =
++ auth_rsp->u.dhchap_security_config.auth_mode;
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1025 Received security config local_wwpn:"
++ "%llX remote_wwpn:%llX \nmode:0x%x "
++ "hash(%d):%x:%x:%x:%x bidir:0x%x "
++ "dh_group(%d):%x:%x:%x:%x:%x:%x:%x:%x "
++ "reauth_interval:0x%x\n",
++ (unsigned long long)auth_rsp->local_wwpn,
++ (unsigned long long)auth_rsp->remote_wwpn,
++ auth_rsp->u.dhchap_security_config.auth_mode,
++ auth_rsp->u.dhchap_security_config.hash_len,
++ auth_rsp->u.dhchap_security_config.hash_priority[0],
++ auth_rsp->u.dhchap_security_config.hash_priority[1],
++ auth_rsp->u.dhchap_security_config.hash_priority[2],
++ auth_rsp->u.dhchap_security_config.hash_priority[3],
++ auth_rsp->u.dhchap_security_config.bidirectional,
++ auth_rsp->u.dhchap_security_config.dh_group_len,
++ auth_rsp->u.dhchap_security_config.dh_group_priority[0],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[1],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[2],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[3],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[4],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[5],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[6],
++ auth_rsp->u.dhchap_security_config.dh_group_priority[7],
++ auth_rsp->u.dhchap_security_config.reauth_interval);
++ kfree(auth_rsp);
++ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE)
++ vport->auth.security_active = 1;
++ else if (vport->auth.auth_mode == FC_AUTHMODE_PASSIVE) {
++ if (ndlp->nlp_flag & NLP_SC_REQ)
++ vport->auth.security_active = 1;
++ else {
++ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
++ "1038 Authentication not "
++ "required by the fabric. "
++ "Disabled.\n");
++ vport->auth.security_active = 0;
++ }
++ } else {
++ vport->auth.security_active = 0;
++ /*
++ * If switch require authentication and authentication
++ * is disabled for this HBA/Fabric port, fail the
++ * discovery.
++ */
++ if (ndlp->nlp_flag & NLP_SC_REQ) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1050 Authentication mode is "
++ "disabled, but is required by "
++ "the fabric.\n");
++ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
++ /* Cancel discovery timer */
++ lpfc_can_disctmo(vport);
++ }
++ }
++ if (last_auth_mode == FC_AUTHMODE_UNKNOWN) {
++ if (vport->auth.security_active)
++ lpfc_start_authentication(vport, ndlp);
++ else
++ lpfc_start_discovery(vport);
++ }
++}
++
++int
++lpfc_get_security_enabled(struct Scsi_Host *shost)
++{
++ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
++
++ return(vport->cfg_enable_auth);
++}
++
++int
++lpfc_security_wait(struct lpfc_hba *phba)
++{
++ int i = 0;
++ if (lpfc_security_service_state == SECURITY_ONLINE)
++ return 0;
++ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY,
++ "1058 Waiting for authentication service...\n");
++ while (lpfc_security_service_state == SECURITY_OFFLINE) {
++ i++;
++ if (i > SECURITY_WAIT_TMO * 2)
++ return -ETIMEDOUT;
++ /* Delay for half of a second */
++ msleep(500);
++ }
++ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY,
++ "1059 Authentication service online.\n");
++ return 0;
++}
++
++int
++lpfc_security_config_wait(struct lpfc_vport *vport)
++{
++ int i = 0;
++
++ while (vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN) {
++ i++;
++ if (i > 120) {
++ return -ETIMEDOUT;
++ }
++ /* Delay for half of a second */
++ msleep(500);
++ }
++ return 0;
++}
++
++void
++lpfc_reauth_node(unsigned long ptr)
++{
++ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
++ struct lpfc_vport *vport = ndlp->vport;
++ struct lpfc_hba *phba = vport->phba;
++ unsigned long flags;
++ struct lpfc_work_evt *evtp = &ndlp->els_reauth_evt;
++
++ ndlp = (struct lpfc_nodelist *) ptr;
++ phba = ndlp->vport->phba;
++
++ spin_lock_irqsave(&phba->hbalock, flags);
++ if (!list_empty(&evtp->evt_listp)) {
++ spin_unlock_irqrestore(&phba->hbalock, flags);
++ return;
++ }
++
++ /* We need to hold the node resource by incrementing the reference
++ * count until this queued work is done
++ */
++ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
++ if (evtp->evt_arg1) {
++ evtp->evt = LPFC_EVT_REAUTH;
++ list_add_tail(&evtp->evt_listp, &phba->work_list);
++ lpfc_worker_wake_up(phba);
++ }
++ spin_unlock_irqrestore(&phba->hbalock, flags);
++ return;
++}
++
++void
++lpfc_reauthentication_handler(struct lpfc_nodelist *ndlp)
++{
++ struct lpfc_vport *vport = ndlp->vport;
++ if (vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS)
++ return;
++
++ if (lpfc_start_node_authentication(ndlp)) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1029 Reauthentication Failure\n");
++ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS)
++ lpfc_port_auth_failed(ndlp);
++ }
++}
++
++/*
++ * This function will kick start authentication for a node.
++ * This is used for re-authentication of a node or a user
++ * initiated node authentication.
++ */
++int
++lpfc_start_node_authentication(struct lpfc_nodelist *ndlp)
++{
++ struct lpfc_vport *vport;
++ int ret;
++
++ vport = ndlp->vport;
++ /* If there is authentication timer cancel the timer */
++ del_timer_sync(&ndlp->nlp_reauth_tmr);
++ ret = lpfc_get_auth_config(ndlp, &ndlp->nlp_portname);
++ if (ret)
++ return ret;
++ ret = lpfc_security_config_wait(vport);
++ if (ret) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1032 Start Authentication: get config "
++ "timed out.\n");
++ return ret;
++ }
++ return 0;
++}
++
++int
++lpfc_get_auth_config(struct lpfc_nodelist *ndlp, struct lpfc_name *rwwn)
++{
++ struct lpfc_vport *vport;
++ struct fc_auth_req auth_req;
++ struct fc_auth_rsp *auth_rsp;
++ struct Scsi_Host *shost;
++ int ret;
++
++ vport = ndlp->vport;
++ shost = lpfc_shost_from_vport(vport);
++
++ auth_req.local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
++ if (ndlp->nlp_type & NLP_FABRIC)
++ auth_req.remote_wwpn = AUTH_FABRIC_WWN;
++ else
++ auth_req.remote_wwpn = wwn_to_u64(rwwn->u.wwn);
++ if (lpfc_security_service_state == SECURITY_OFFLINE) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1053 Start Authentication: "
++ "Security service offline.\n");
++ return -EINVAL;
++ }
++ auth_rsp = kmalloc(sizeof(struct fc_auth_rsp), GFP_KERNEL);
++ if (!auth_rsp) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1028 Start Authentication: No buffers\n");
++ return -ENOMEM;
++ }
++ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
++ ret = lpfc_fc_security_get_config(shost, &auth_req,
++ sizeof(struct fc_auth_req),
++ auth_rsp,
++ sizeof(struct fc_auth_rsp));
++ if (ret) {
++ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
++ "1031 Start Authentication: Get config "
++ "failed.\n");
++ kfree(auth_rsp);
++ return ret;
++ }
++ return 0;
++}
+--- /dev/null
++++ b/drivers/scsi/lpfc/lpfc_security.h
+@@ -0,0 +1,24 @@
++/*******************************************************************
++ * This file is part of the Emulex Linux Device Driver for *
++ * Fibre Channel Host Bus Adapters. *
++ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
++ * EMULEX and SLI are trademarks of Emulex. *
++ * www.emulex.com *
++ * *
++ * This program is free software; you can redistribute it and/or *
++ * modify it under the terms of version 2 of the GNU General *
++ * Public License as published by the Free Software Foundation. *
++ * This program is distributed in the hope that it will be useful. *
++ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
++ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
++ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
++ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
++ * TO BE LEGALLY INVALID. See the GNU General Public License for *
++ * more details, a copy of which can be found in the file COPYING *
++ * included with this package. *
++ *******************************************************************/
++
++#define SECURITY_OFFLINE 0x0
++#define SECURITY_ONLINE 0x1
++
++#define SECURITY_WAIT_TMO 30 /* seconds to wait for the auth service */
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -796,7 +796,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba
+ hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
+ hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
+ hbqe->bde.tus.f.bdeSize = hbq_buf->size;
+- hbqe->bde.tus.f.bdeFlags = 0;
++ hbqe->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
+ hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
+ /* Sync SLIM */
+@@ -1051,6 +1051,9 @@ lpfc_sli_chk_mbx_command(uint8_t mbxComm
+ case MBX_REG_VPI:
+ case MBX_UNREG_VPI:
+ case MBX_HEARTBEAT:
++ case MBX_READ_EVENT_LOG_STATUS:
++ case MBX_READ_EVENT_LOG:
++ case MBX_WRITE_EVENT_LOG:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
+ ret = mbxCommand;
+@@ -3546,9 +3549,35 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb
+ mb->mbxOwner = OWN_CHIP;
+
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+- /* First copy command data to host SLIM area */
++ /* Populate mbox extension offset word. */
++ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
++ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
++ = (uint8_t *)phba->mbox_ext
++ - (uint8_t *)phba->mbox;
++ }
++
++ /* Copy the mailbox extension data */
++ if (pmbox->in_ext_byte_len && pmbox->context2) {
++ lpfc_sli_pcimem_bcopy(pmbox->context2,
++ (uint8_t*)phba->mbox_ext,
++ pmbox->in_ext_byte_len);
++ }
++ /* Copy command data to host SLIM area */
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
++
+ } else {
++ /* Populate mbox extension offset word. */
++ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
++ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
++ = MAILBOX_HBA_EXT_OFFSET;
++
++ /* Copy the mailbox extension data */
++ if (pmbox->in_ext_byte_len && pmbox->context2) {
++ lpfc_memcpy_to_slim(phba->MBslimaddr +
++ MAILBOX_HBA_EXT_OFFSET,
++ pmbox->context2, pmbox->in_ext_byte_len);
++
++ }
+ if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ /* copy command data into host mbox for cmpl */
+ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+@@ -3658,15 +3687,22 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb
+ if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+ /* copy results back to user */
+ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
++ /* Copy the mailbox extension data */
++ if (pmbox->out_ext_byte_len && pmbox->context2) {
++ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
++ pmbox->context2,
++ pmbox->out_ext_byte_len);
++ }
+ } else {
+ /* First copy command data */
+ lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ MAILBOX_CMD_SIZE);
+- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
+- pmbox->context2) {
+- lpfc_memcpy_from_slim((void *)pmbox->context2,
+- phba->MBslimaddr + DMP_RSP_OFFSET,
+- mb->un.varDmp.word_cnt);
++ /* Copy the mailbox extension data */
++ if (pmbox->out_ext_byte_len && pmbox->context2) {
++ lpfc_memcpy_from_slim(pmbox->context2,
++ phba->MBslimaddr +
++ MAILBOX_HBA_EXT_OFFSET,
++ pmbox->out_ext_byte_len);
+ }
+ }
+
+@@ -5395,6 +5431,15 @@ lpfc_sp_intr_handler(int irq, void *dev_
+ if (pmb->mbox_cmpl) {
+ lpfc_sli_pcimem_bcopy(mbox, pmbox,
+ MAILBOX_CMD_SIZE);
++ /* Copy the mailbox extension data */
++ if (pmb->out_ext_byte_len &&
++ pmb->context2) {
++ lpfc_sli_pcimem_bcopy(
++ phba->mbox_ext,
++ pmb->context2,
++ pmb->out_ext_byte_len);
++ }
++
+ }
+ if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+ pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+--- a/drivers/scsi/lpfc/lpfc_sli.h
++++ b/drivers/scsi/lpfc/lpfc_sli.h
+@@ -88,6 +88,9 @@ typedef struct lpfcMboxq {
+
+ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
+ uint8_t mbox_flag;
++ uint16_t in_ext_byte_len;
++ uint16_t out_ext_byte_len;
++ uint8_t mbox_offset_word;
+
+ } LPFC_MBOXQ_t;
+
+--- a/drivers/scsi/lpfc/lpfc_version.h
++++ b/drivers/scsi/lpfc/lpfc_version.h
+@@ -18,7 +18,7 @@
+ * included with this package. *
+ *******************************************************************/
+
+-#define LPFC_DRIVER_VERSION "8.2.8"
++#define LPFC_DRIVER_VERSION "8.2.8.1"
+
+ #define LPFC_DRIVER_NAME "lpfc"
+ #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -42,6 +42,7 @@
+ #include "lpfc_crtn.h"
+ #include "lpfc_version.h"
+ #include "lpfc_vport.h"
++#include "lpfc_auth_access.h"
+
+ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
+ enum fc_vport_state new_state)
+@@ -394,6 +395,21 @@ lpfc_vport_create(struct fc_vport *fc_vp
+ goto error_out;
+ }
+
++ shost = lpfc_shost_from_vport(vport);
++
++ if ((lpfc_get_security_enabled)(shost)) {
++ spin_lock_irq(&fc_security_user_lock);
++
++ list_add_tail(&vport->sc_users, &fc_security_user_list);
++
++ spin_unlock_irq(&fc_security_user_lock);
++
++ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) {
++ lpfc_fc_queue_security_work(vport,
++ &vport->sc_online_work);
++ }
++ }
++
+ *(struct lpfc_vport **)fc_vport->dd_data = vport;
+ vport->fc_vport = fc_vport;
+
+--- a/drivers/scsi/lpfc/Makefile
++++ b/drivers/scsi/lpfc/Makefile
+@@ -1,7 +1,7 @@
+ #/*******************************************************************
+ # * This file is part of the Emulex Linux Device Driver for *
+ # * Fibre Channel Host Bus Adapters. *
+-# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
++# * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ # * EMULEX and SLI are trademarks of Emulex. *
+ # * www.emulex.com *
+ # * *
+@@ -28,4 +28,5 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
+
+ lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
+ lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
+- lpfc_vport.o lpfc_debugfs.o
++ lpfc_vport.o lpfc_debugfs.o lpfc_security.o lpfc_auth_access.o \
++ lpfc_auth.o lpfc_ioctl.o lpfc_menlo.o