]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.drivers/lpfc-8.2.8.1-update
Fix oinkmaster patch.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.drivers / lpfc-8.2.8.1-update
CommitLineData
2cb7cef9
BS
1From: Jamie Wellnitz <jamie.wellnitz@emulex.com>
2Subject: Update lpfc to 8.2.8.1
3References: bnc#420767
4
5This patch adds a few features (including FC authentication and a few
6management ioctls) to lpfc 8.2.8 and generates 8.2.8.1.
7
8Signed-off-by: Hannes Reinecke <hare@suse.de>
9
10---
11 drivers/scsi/lpfc/Makefile | 5
12 drivers/scsi/lpfc/lpfc.h | 129 +
13 drivers/scsi/lpfc/lpfc_attr.c | 976 ++++++++++++-
14 drivers/scsi/lpfc/lpfc_auth.c | 838 +++++++++++
15 drivers/scsi/lpfc/lpfc_auth.h | 92 +
16 drivers/scsi/lpfc/lpfc_auth_access.c | 598 ++++++++
17 drivers/scsi/lpfc/lpfc_auth_access.h | 245 +++
18 drivers/scsi/lpfc/lpfc_crtn.h | 37
19 drivers/scsi/lpfc/lpfc_disc.h | 3
20 drivers/scsi/lpfc/lpfc_els.c | 663 +++++++++
21 drivers/scsi/lpfc/lpfc_hbadisc.c | 154 +-
22 drivers/scsi/lpfc/lpfc_hw.h | 52
23 drivers/scsi/lpfc/lpfc_init.c | 154 +-
24 drivers/scsi/lpfc/lpfc_ioctl.c | 2519 +++++++++++++++++++++++++++++++++++
25 drivers/scsi/lpfc/lpfc_ioctl.h | 184 ++
26 drivers/scsi/lpfc/lpfc_logmsg.h | 1
27 drivers/scsi/lpfc/lpfc_mbox.c | 2
28 drivers/scsi/lpfc/lpfc_menlo.c | 1174 ++++++++++++++++
29 drivers/scsi/lpfc/lpfc_scsi.c | 36
30 drivers/scsi/lpfc/lpfc_security.c | 339 ++++
31 drivers/scsi/lpfc/lpfc_security.h | 24
32 drivers/scsi/lpfc/lpfc_sli.c | 59
33 drivers/scsi/lpfc/lpfc_sli.h | 3
34 drivers/scsi/lpfc/lpfc_version.h | 2
35 drivers/scsi/lpfc/lpfc_vport.c | 16
36 25 files changed, 8149 insertions(+), 156 deletions(-)
37
38--- a/drivers/scsi/lpfc/lpfc_attr.c
39+++ b/drivers/scsi/lpfc/lpfc_attr.c
40@@ -41,6 +41,7 @@
41 #include "lpfc_compat.h"
42 #include "lpfc_crtn.h"
43 #include "lpfc_vport.h"
44+#include "lpfc_auth_access.h"
45
46 #define LPFC_DEF_DEVLOSS_TMO 30
47 #define LPFC_MIN_DEVLOSS_TMO 1
48@@ -50,6 +51,15 @@
49 #define LPFC_LINK_SPEED_BITMAP 0x00000117
50 #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
51
52+extern struct bin_attribute sysfs_menlo_attr;
53+
54+/*
55+ * Write key size should be multiple of 4. If write key is changed
56+ * make sure that library write key is also changed.
57+ */
58+#define LPFC_REG_WRITE_KEY_SIZE 4
59+#define LPFC_REG_WRITE_KEY "EMLX"
60+
61 /**
62 * lpfc_jedec_to_ascii: Hex to ascii convertor according to JEDEC rules.
63 * @incr: integer to convert.
64@@ -551,7 +561,7 @@ lpfc_do_offline(struct lpfc_hba *phba, u
65 * -EIO reset not configured or error posting the event
66 * zero for success
67 **/
68-static int
69+int
70 lpfc_selective_reset(struct lpfc_hba *phba)
71 {
72 struct completion online_compl;
73@@ -1080,6 +1090,141 @@ lpfc_poll_store(struct device *dev, stru
74 return strlen(buf);
75 }
76
77+static ssize_t
78+lpfc_auth_state_show(struct device *dev, struct device_attribute *attr,
79+ char *buf)
80+{
81+ struct Scsi_Host *shost = class_to_shost(dev);
82+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
83+ switch (vport->auth.auth_state) {
84+ case LPFC_AUTH_UNKNOWN:
85+ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
86+ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
87+ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
88+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
89+ return snprintf(buf, PAGE_SIZE, "Authenticating\n");
90+ else
91+ return snprintf(buf, PAGE_SIZE, "Not Authenticated\n");
92+ case LPFC_AUTH_FAIL:
93+ return snprintf(buf, PAGE_SIZE, "Failed\n");
94+ case LPFC_AUTH_SUCCESS:
95+ if (vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE ||
96+ vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE ||
97+ vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY ||
98+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY)
99+ return snprintf(buf, PAGE_SIZE, "Authenticating\n");
100+ else if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS)
101+ return snprintf(buf, PAGE_SIZE, "Authenticated\n");
102+ }
103+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
104+}
105+
106+static ssize_t
107+lpfc_auth_dir_show(struct device *dev, struct device_attribute *attr,
108+ char *buf)
109+{
110+ struct Scsi_Host *shost = class_to_shost(dev);
111+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
112+ if (!vport->cfg_enable_auth ||
113+ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
114+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
115+ if (vport->auth.direction == AUTH_DIRECTION_LOCAL)
116+ return snprintf(buf, PAGE_SIZE, "Local Authenticated\n");
117+ else if (vport->auth.direction == AUTH_DIRECTION_REMOTE)
118+ return snprintf(buf, PAGE_SIZE, "Remote Authenticated\n");
119+ else if (vport->auth.direction == AUTH_DIRECTION_BIDI)
120+ return snprintf(buf, PAGE_SIZE, "Bidi Authentication\n");
121+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
122+}
123+
124+static ssize_t
125+lpfc_auth_protocol_show(struct device *dev, struct device_attribute *attr,
126+ char *buf)
127+{
128+ struct Scsi_Host *shost = class_to_shost(dev);
129+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
130+ if (vport->cfg_enable_auth &&
131+ vport->auth.auth_state == LPFC_AUTH_SUCCESS)
132+ return snprintf(buf, PAGE_SIZE, "1 (DH-CHAP)\n");
133+ else
134+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
135+}
136+
137+static ssize_t
138+lpfc_auth_dhgroup_show(struct device *dev, struct device_attribute *attr,
139+ char *buf)
140+{
141+ struct Scsi_Host *shost = class_to_shost(dev);
142+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
143+ if (!vport->cfg_enable_auth ||
144+ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
145+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
146+ switch (vport->auth.group_id) {
147+ case DH_GROUP_NULL:
148+ return snprintf(buf, PAGE_SIZE, "0 (NULL)\n");
149+ case DH_GROUP_1024:
150+ return snprintf(buf, PAGE_SIZE, "1 (1024)\n");
151+ case DH_GROUP_1280:
152+ return snprintf(buf, PAGE_SIZE, "2 (1280)\n");
153+ case DH_GROUP_1536:
154+ return snprintf(buf, PAGE_SIZE, "3 (1536)\n");
155+ case DH_GROUP_2048:
156+ return snprintf(buf, PAGE_SIZE, "4 (2048)\n");
157+ }
158+ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
159+ vport->auth.group_id);
160+}
161+
162+static ssize_t
163+lpfc_auth_hash_show(struct device *dev, struct device_attribute *attr,
164+ char *buf)
165+{
166+ struct Scsi_Host *shost = class_to_shost(dev);
167+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
168+ if (!vport->cfg_enable_auth ||
169+ vport->auth.auth_state != LPFC_AUTH_SUCCESS)
170+ return snprintf(buf, PAGE_SIZE, "Unknown\n");
171+ switch (vport->auth.hash_id) {
172+ case FC_SP_HASH_MD5:
173+ return snprintf(buf, PAGE_SIZE, "5 (MD5)\n");
174+ case FC_SP_HASH_SHA1:
175+ return snprintf(buf, PAGE_SIZE, "6 (SHA1)\n");
176+ }
177+ return snprintf(buf, PAGE_SIZE, "%d (Unrecognized)\n",
178+ vport->auth.hash_id);
179+}
180+static ssize_t
181+lpfc_auth_last_show(struct device *dev, struct device_attribute *attr,
182+ char *buf)
183+{
184+ struct Scsi_Host *shost = class_to_shost(dev);
185+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
186+ struct timeval last_time;
187+ if (!vport->cfg_enable_auth || vport->auth.last_auth == 0)
188+ return snprintf(buf, PAGE_SIZE, "%d\n", -1);
189+ jiffies_to_timeval((jiffies - vport->auth.last_auth), &last_time);
190+ return snprintf(buf, PAGE_SIZE, "%ld\n", last_time.tv_sec);
191+}
192+
193+static ssize_t
194+lpfc_auth_next_show(struct device *dev, struct device_attribute *attr,
195+ char *buf)
196+{
197+ struct Scsi_Host *shost = class_to_shost(dev);
198+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
199+ unsigned long next_jiff;
200+ struct timeval next_time;
201+ if (!vport->cfg_enable_auth ||
202+ vport->auth.last_auth == 0 ||
203+ vport->auth.reauth_interval == 0)
204+ return snprintf(buf, PAGE_SIZE, "%d\n", -1);
205+ /* calculate the amount of time left until next auth */
206+ next_jiff = (msecs_to_jiffies(vport->auth.reauth_interval * 60000) +
207+ vport->auth.last_auth) - jiffies;
208+ jiffies_to_timeval(next_jiff, &next_time);
209+ return snprintf(buf, PAGE_SIZE, "%ld\n", next_time.tv_sec);
210+}
211+
212 /**
213 * lpfc_param_show: Return a cfg attribute value in decimal.
214 *
215@@ -1512,7 +1657,38 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpf
216 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
217 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
218 static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
219+static DEVICE_ATTR(auth_state, S_IRUGO, lpfc_auth_state_show, NULL);
220+static DEVICE_ATTR(auth_dir, S_IRUGO, lpfc_auth_dir_show, NULL);
221+static DEVICE_ATTR(auth_protocol, S_IRUGO, lpfc_auth_protocol_show, NULL);
222+static DEVICE_ATTR(auth_dhgroup, S_IRUGO, lpfc_auth_dhgroup_show, NULL);
223+static DEVICE_ATTR(auth_hash, S_IRUGO, lpfc_auth_hash_show, NULL);
224+static DEVICE_ATTR(auth_last, S_IRUGO, lpfc_auth_last_show, NULL);
225+static DEVICE_ATTR(auth_next, S_IRUGO, lpfc_auth_next_show, NULL);
226+
227+static int
228+lpfc_parse_wwn(const char *ns, uint8_t *nm)
229+{
230+ unsigned int i, j;
231+ memset(nm, 0, 8);
232+
233+ /* Validate and store the new name */
234+ for (i = 0, j = 0; i < 16; i++) {
235+ if ((*ns >= 'a') && (*ns <= 'f'))
236+ j = ((j << 4) | ((*ns++ - 'a') + 10));
237+ else if ((*ns >= 'A') && (*ns <= 'F'))
238+ j = ((j << 4) | ((*ns++ - 'A') + 10));
239+ else if ((*ns >= '0') && (*ns <= '9'))
240+ j = ((j << 4) | (*ns++ - '0'));
241+ else
242+ return -EINVAL;
243+ if (i % 2) {
244+ nm[i/2] = j & 0xff;
245+ j = 0;
246+ }
247+ }
248
249+ return 0;
250+}
251
252 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
253
254@@ -1908,6 +2084,87 @@ lpfc_vport_param_store(nodev_tmo)
255
256 static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
257 lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
258+static ssize_t
259+lpfc_authenticate(struct device *dev, struct device_attribute *attr,
260+ const char *buf, size_t count)
261+{
262+ struct Scsi_Host *shost = class_to_shost(dev);
263+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
264+ struct lpfc_hba *phba = vport->phba;
265+ struct lpfc_nodelist *ndlp;
266+ int status;
267+ struct lpfc_name wwpn;
268+
269+ if (lpfc_parse_wwn(buf, wwpn.u.wwn))
270+ return -EINVAL;
271+
272+ if (vport->port_state == LPFC_VPORT_FAILED) {
273+ lpfc_issue_lip(shost);
274+ return strlen(buf);
275+ }
276+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
277+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
278+ (!vport->cfg_enable_auth))
279+ return -EPERM;
280+
281+ /* If vport already in the middle of authentication do not restart */
282+ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) ||
283+ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) ||
284+ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY))
285+ return -EAGAIN;
286+
287+ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN)
288+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
289+ else
290+ ndlp = lpfc_findnode_wwnn(vport, &wwpn);
291+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
292+ return -EPERM;
293+ status = lpfc_start_node_authentication(ndlp);
294+ if (status)
295+ return status;
296+ return strlen(buf);
297+}
298+static DEVICE_ATTR(lpfc_authenticate, S_IRUGO | S_IWUSR, NULL,
299+ lpfc_authenticate);
300+
301+static ssize_t
302+lpfc_update_auth_config(struct device *dev, struct device_attribute *attr,
303+ const char *buf, size_t count)
304+{
305+ struct Scsi_Host *shost = class_to_shost(dev);
306+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
307+ struct lpfc_hba *phba = vport->phba;
308+ struct lpfc_nodelist *ndlp;
309+ struct lpfc_name wwpn;
310+ int status;
311+
312+ if (lpfc_parse_wwn(buf, wwpn.u.wwn))
313+ return -EINVAL;
314+
315+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
316+ (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
317+ (!vport->cfg_enable_auth))
318+ return -EPERM;
319+
320+ /* If vport already in the middle of authentication do not restart */
321+ if ((vport->auth.auth_msg_state == LPFC_AUTH_NEGOTIATE) ||
322+ (vport->auth.auth_msg_state == LPFC_DHCHAP_CHALLENGE) ||
323+ (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY))
324+ return -EAGAIN;
325+
326+ if (wwn_to_u64(wwpn.u.wwn) == AUTH_FABRIC_WWN)
327+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
328+ else
329+ ndlp = lpfc_findnode_wwnn(vport, &wwpn);
330+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
331+ return -EPERM;
332+ status = lpfc_get_auth_config(ndlp, &wwpn);
333+ if (status)
334+ return -EPERM;
335+ return strlen(buf);
336+}
337+static DEVICE_ATTR(lpfc_update_auth_config, S_IRUGO | S_IWUSR,
338+ NULL, lpfc_update_auth_config);
339
340 /*
341 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
342@@ -2753,6 +3010,48 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Messa
343 "MSI-X (2), if possible");
344
345 /*
346+# lpfc_enable_auth: controls FC Authentication.
347+# 0 = Authentication OFF
348+# 1 = Authentication ON
349+# Value range [0,1]. Default value is 0.
350+*/
351+static int lpfc_enable_auth;
352+module_param(lpfc_enable_auth, int, 0);
353+MODULE_PARM_DESC(lpfc_enable_auth, "Enable FC Authentication");
354+lpfc_vport_param_show(enable_auth);
355+lpfc_vport_param_init(enable_auth, 0, 0, 1);
356+static int
357+lpfc_enable_auth_set(struct lpfc_vport *vport, int val)
358+{
359+ if (val == vport->cfg_enable_auth)
360+ return 0;
361+ if (val == 0) {
362+ spin_lock_irq(&fc_security_user_lock);
363+ list_del(&vport->sc_users);
364+ spin_unlock_irq(&fc_security_user_lock);
365+ vport->cfg_enable_auth = val;
366+ lpfc_fc_queue_security_work(vport,
367+ &vport->sc_offline_work);
368+ return 0;
369+ } else if (val == 1) {
370+ spin_lock_irq(&fc_security_user_lock);
371+ list_add_tail(&vport->sc_users, &fc_security_user_list);
372+ spin_unlock_irq(&fc_security_user_lock);
373+ vport->cfg_enable_auth = val;
374+ lpfc_fc_queue_security_work(vport,
375+ &vport->sc_online_work);
376+ return 0;
377+ }
378+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
379+ "0431 lpfc_enable_auth attribute cannot be set to %d, "
380+ "allowed range is [0, 1]\n", val);
381+ return -EINVAL;
382+}
383+lpfc_vport_param_store(enable_auth);
384+static DEVICE_ATTR(lpfc_enable_auth, S_IRUGO | S_IWUSR,
385+ lpfc_enable_auth_show, lpfc_enable_auth_store);
386+
387+/*
388 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
389 # 0 = HBA resets disabled
390 # 1 = HBA resets enabled (default)
391@@ -2825,6 +3124,16 @@ struct device_attribute *lpfc_hba_attrs[
392 &dev_attr_lpfc_poll,
393 &dev_attr_lpfc_poll_tmo,
394 &dev_attr_lpfc_use_msi,
395+ &dev_attr_lpfc_enable_auth,
396+ &dev_attr_lpfc_authenticate,
397+ &dev_attr_lpfc_update_auth_config,
398+ &dev_attr_auth_state,
399+ &dev_attr_auth_dir,
400+ &dev_attr_auth_protocol,
401+ &dev_attr_auth_dhgroup,
402+ &dev_attr_auth_hash,
403+ &dev_attr_auth_last,
404+ &dev_attr_auth_next,
405 &dev_attr_lpfc_soft_wwnn,
406 &dev_attr_lpfc_soft_wwpn,
407 &dev_attr_lpfc_soft_wwn_enable,
408@@ -2855,6 +3164,14 @@ struct device_attribute *lpfc_vport_attr
409 &dev_attr_nport_evt_cnt,
410 &dev_attr_npiv_info,
411 &dev_attr_lpfc_enable_da_id,
412+ &dev_attr_auth_state,
413+ &dev_attr_auth_dir,
414+ &dev_attr_auth_protocol,
415+ &dev_attr_auth_dhgroup,
416+ &dev_attr_auth_hash,
417+ &dev_attr_auth_last,
418+ &dev_attr_auth_next,
419+
420 &dev_attr_lpfc_max_scsicmpl_time,
421 &dev_attr_lpfc_stat_data_ctrl,
422 NULL,
423@@ -2888,21 +3205,23 @@ sysfs_ctlreg_write(struct kobject *kobj,
424 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
425 struct lpfc_hba *phba = vport->phba;
426
427- if ((off + count) > FF_REG_AREA_SIZE)
428+ if ((off + count) > FF_REG_AREA_SIZE + LPFC_REG_WRITE_KEY_SIZE)
429 return -ERANGE;
430
431- if (count == 0) return 0;
432+ if (count <= LPFC_REG_WRITE_KEY_SIZE)
433+ return 0;
434
435 if (off % 4 || count % 4 || (unsigned long)buf % 4)
436 return -EINVAL;
437
438- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
439- return -EPERM;
440- }
441+ /* This is to protect HBA registers from accidental writes. */
442+ if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
443+ return -EINVAL;
444
445 spin_lock_irq(&phba->hbalock);
446- for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
447- writel(*((uint32_t *)(buf + buf_off)),
448+ for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
449+ buf_off += sizeof(uint32_t))
450+ writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
451 phba->ctrl_regs_memmap_p + off + buf_off);
452
453 spin_unlock_irq(&phba->hbalock);
454@@ -2971,21 +3290,211 @@ static struct bin_attribute sysfs_ctlreg
455 .write = sysfs_ctlreg_write,
456 };
457
458+static struct lpfc_sysfs_mbox *
459+lpfc_get_sysfs_mbox(struct lpfc_hba *phba, uint8_t create)
460+{
461+ struct lpfc_sysfs_mbox *sysfs_mbox;
462+ pid_t pid;
463+
464+ pid = current->pid;
465+
466+ spin_lock_irq(&phba->hbalock);
467+ list_for_each_entry(sysfs_mbox, &phba->sysfs_mbox_list, list) {
468+ if (sysfs_mbox->pid == pid) {
469+ spin_unlock_irq(&phba->hbalock);
470+ return sysfs_mbox;
471+ }
472+ }
473+ if (!create) {
474+ spin_unlock_irq(&phba->hbalock);
475+ return NULL;
476+ }
477+ spin_unlock_irq(&phba->hbalock);
478+ sysfs_mbox = kzalloc(sizeof(struct lpfc_sysfs_mbox),
479+ GFP_KERNEL);
480+ if (!sysfs_mbox)
481+ return NULL;
482+ sysfs_mbox->state = SMBOX_IDLE;
483+ sysfs_mbox->pid = pid;
484+ spin_lock_irq(&phba->hbalock);
485+ list_add_tail(&sysfs_mbox->list, &phba->sysfs_mbox_list);
486+
487+ spin_unlock_irq(&phba->hbalock);
488+ return sysfs_mbox;
489+
490+}
491 /**
492 * sysfs_mbox_idle: frees the sysfs mailbox.
493 * @phba: lpfc_hba pointer
494 **/
495 static void
496-sysfs_mbox_idle(struct lpfc_hba *phba)
497+sysfs_mbox_idle(struct lpfc_hba *phba,
498+ struct lpfc_sysfs_mbox *sysfs_mbox)
499 {
500- phba->sysfs_mbox.state = SMBOX_IDLE;
501- phba->sysfs_mbox.offset = 0;
502-
503- if (phba->sysfs_mbox.mbox) {
504- mempool_free(phba->sysfs_mbox.mbox,
505+ list_del_init(&sysfs_mbox->list);
506+ if (sysfs_mbox->mbox) {
507+ mempool_free(sysfs_mbox->mbox,
508 phba->mbox_mem_pool);
509- phba->sysfs_mbox.mbox = NULL;
510 }
511+
512+ if (sysfs_mbox->mbext)
513+ kfree(sysfs_mbox->mbext);
514+
515+ /* If txmit buffer allocated free txmit buffer */
516+ if (sysfs_mbox->txmit_buff) {
517+ if (sysfs_mbox->txmit_buff->virt)
518+ __lpfc_mbuf_free(phba,
519+ sysfs_mbox->txmit_buff->virt,
520+ sysfs_mbox->txmit_buff->phys);
521+ kfree(sysfs_mbox->txmit_buff);
522+ }
523+
524+ /* If rcv buffer allocated free txmit buffer */
525+ if (sysfs_mbox->rcv_buff) {
526+ if (sysfs_mbox->rcv_buff->virt)
527+ __lpfc_mbuf_free(phba,
528+ sysfs_mbox->rcv_buff->virt,
529+ sysfs_mbox->rcv_buff->phys);
530+ kfree(sysfs_mbox->rcv_buff);
531+ }
532+
533+ kfree(sysfs_mbox);
534+}
535+
536+static size_t
537+lpfc_syfs_mbox_copy_rcv_buff(struct lpfc_hba *phba,
538+ struct lpfc_sysfs_mbox *sysfs_mbox,
539+ char *buf, loff_t off, size_t count)
540+{
541+ uint32_t size;
542+ spin_lock_irq(&phba->hbalock);
543+ if (!sysfs_mbox->mbox) {
544+ sysfs_mbox_idle(phba, sysfs_mbox);
545+ spin_unlock_irq(&phba->hbalock);
546+ return -EAGAIN;
547+ }
548+
549+ if (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)
550+ size = sysfs_mbox->mbox->mb.un.
551+ varRdEventLog.rcv_bde64.tus.f.bdeSize;
552+ else
553+ size = sysfs_mbox->mbox->mb.un.
554+ varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize;
555+
556+
557+ if ((count + off) > size) {
558+ sysfs_mbox_idle(phba, sysfs_mbox);
559+ spin_unlock_irq(&phba->hbalock);
560+ return -ERANGE;
561+ }
562+ if (count > LPFC_BPL_SIZE) {
563+ sysfs_mbox_idle(phba, sysfs_mbox);
564+ spin_unlock_irq(&phba->hbalock);
565+ return -ERANGE;
566+ }
567+ if (sysfs_mbox->extoff != off) {
568+ sysfs_mbox_idle(phba, sysfs_mbox);
569+ spin_unlock_irq(&phba->hbalock);
570+ return -EAGAIN;
571+ }
572+
573+ memcpy(buf, (uint8_t *) sysfs_mbox->rcv_buff->virt + off, count);
574+ sysfs_mbox->extoff = off + count;
575+
576+ if (sysfs_mbox->extoff >= size)
577+ sysfs_mbox_idle(phba, sysfs_mbox);
578+
579+ spin_unlock_irq(&phba->hbalock);
580+
581+ return count;
582+}
583+
584+static size_t
585+lpfc_syfs_mbox_copy_extdata(struct lpfc_hba *phba,
586+ struct lpfc_sysfs_mbox * sysfs_mbox,
587+ char *buf, loff_t off, size_t count)
588+{
589+ uint32_t size;
590+
591+ spin_lock_irq(&phba->hbalock);
592+ if (!sysfs_mbox->mbox) {
593+ sysfs_mbox_idle(phba, sysfs_mbox);
594+ spin_unlock_irq(&phba->hbalock);
595+ return -EAGAIN;
596+ }
597+
598+ size = sysfs_mbox->mbox_data.out_ext_wlen * sizeof(uint32_t);
599+
600+ if ((count + off) > size) {
601+ sysfs_mbox_idle(phba, sysfs_mbox);
602+ spin_unlock_irq(&phba->hbalock);
603+ return -ERANGE;
604+ }
605+
606+ if (size > MAILBOX_EXT_SIZE) {
607+ sysfs_mbox_idle(phba, sysfs_mbox);
608+ spin_unlock_irq(&phba->hbalock);
609+ return -ERANGE;
610+ }
611+
612+ if (sysfs_mbox->extoff != off) {
613+ sysfs_mbox_idle(phba, sysfs_mbox);
614+ spin_unlock_irq(&phba->hbalock);
615+ return -EAGAIN;
616+ }
617+
618+ memcpy(buf, (uint8_t *) sysfs_mbox->mbext + off, count);
619+ sysfs_mbox->extoff = off + count;
620+
621+ if (sysfs_mbox->extoff >= size)
622+ sysfs_mbox_idle(phba, sysfs_mbox);
623+
624+ spin_unlock_irq(&phba->hbalock);
625+
626+ return count;
627+}
628+
629+static size_t
630+lpfc_syfs_mbox_copy_txmit_buff(struct lpfc_hba *phba,
631+ struct lpfc_sysfs_mbox *sysfs_mbox,
632+ char *buf, loff_t off, size_t count)
633+{
634+ uint32_t size;
635+ spin_lock_irq(&phba->hbalock);
636+ if (!sysfs_mbox->mbox ||
637+ (sysfs_mbox->offset != sizeof(struct lpfc_sysfs_mbox_data))) {
638+ sysfs_mbox_idle(phba, sysfs_mbox);
639+ spin_unlock_irq(&phba->hbalock);
640+ return -EAGAIN;
641+ }
642+
643+ size = sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.xmit_bde64.
644+ tus.f.bdeSize;
645+
646+ if ((count + off) > size) {
647+ sysfs_mbox_idle(phba, sysfs_mbox);
648+ spin_unlock_irq(&phba->hbalock);
649+ return -ERANGE;
650+ }
651+
652+ if (size > LPFC_BPL_SIZE) {
653+ sysfs_mbox_idle(phba, sysfs_mbox);
654+ spin_unlock_irq(&phba->hbalock);
655+ return -ERANGE;
656+ }
657+
658+ if (sysfs_mbox->extoff != off) {
659+ sysfs_mbox_idle(phba, sysfs_mbox);
660+ spin_unlock_irq(&phba->hbalock);
661+ return -EAGAIN;
662+ }
663+
664+ memcpy((uint8_t *) sysfs_mbox->txmit_buff->virt + off, buf, count);
665+ sysfs_mbox->extoff = off + count;
666+
667+ spin_unlock_irq(&phba->hbalock);
668+
669+ return count;
670 }
671
672 /**
673@@ -3018,6 +3527,9 @@ sysfs_mbox_write(struct kobject *kobj, s
674 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
675 struct lpfc_hba *phba = vport->phba;
676 struct lpfcMboxq *mbox = NULL;
677+ struct lpfc_sysfs_mbox *sysfs_mbox;
678+ uint8_t *ext;
679+ uint32_t size;
680
681 if ((count + off) > MAILBOX_CMD_SIZE)
682 return -ERANGE;
683@@ -3029,34 +3541,232 @@ sysfs_mbox_write(struct kobject *kobj, s
684 return 0;
685
686 if (off == 0) {
687+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1);
688+ if (sysfs_mbox == NULL)
689+ return -ENOMEM;
690+ /*
691+ * If sysfs expect the reading of buffer and
692+ * app doesnot know how to do it, use a different
693+ * context.
694+ */
695+ if (sysfs_mbox->state == SMBOX_READING_BUFF ||
696+ sysfs_mbox->state == SMBOX_READING_MBEXT) {
697+ spin_lock_irq(&phba->hbalock);
698+ sysfs_mbox_idle(phba, sysfs_mbox);
699+ spin_unlock_irq(&phba->hbalock);
700+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 1);
701+ if (sysfs_mbox == NULL)
702+ return -ENOMEM;
703+ }
704+ } else {
705+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0);
706+ if (sysfs_mbox == NULL)
707+ return -EAGAIN;
708+ }
709+ spin_lock_irq(&phba->hbalock);
710+ if (sysfs_mbox->state == SMBOX_WRITING_MBEXT) {
711+ if (!sysfs_mbox->mbox ||
712+ (sysfs_mbox->offset !=
713+ sizeof(struct lpfc_sysfs_mbox_data))) {
714+ sysfs_mbox_idle(phba, sysfs_mbox);
715+ spin_unlock_irq(&phba->hbalock);
716+ return -EAGAIN;
717+ }
718+
719+ size = sysfs_mbox->mbox_data.in_ext_wlen * sizeof(uint32_t);
720+
721+ if ((count + sysfs_mbox->extoff) > size) {
722+ sysfs_mbox_idle(phba, sysfs_mbox);
723+ spin_unlock_irq(&phba->hbalock);
724+ return -ERANGE;
725+ }
726+
727+ if (size > MAILBOX_EXT_SIZE) {
728+ sysfs_mbox_idle(phba, sysfs_mbox);
729+ spin_unlock_irq(&phba->hbalock);
730+ return -ERANGE;
731+ }
732+
733+ if (!sysfs_mbox->mbext) {
734+ spin_unlock_irq(&phba->hbalock);
735+
736+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
737+ if (!ext) {
738+ spin_lock_irq(&phba->hbalock);
739+ sysfs_mbox_idle(phba, sysfs_mbox);
740+ spin_unlock_irq(&phba->hbalock);
741+ return -ENOMEM;
742+ }
743+
744+ spin_lock_irq(&phba->hbalock);
745+ sysfs_mbox->mbext = ext;
746+ }
747+
748+ if (sysfs_mbox->extoff != off) {
749+ sysfs_mbox_idle(phba, sysfs_mbox);
750+ spin_unlock_irq(&phba->hbalock);
751+ return -EAGAIN;
752+ }
753+
754+ memcpy((uint8_t *) sysfs_mbox->mbext + off, buf, count);
755+ sysfs_mbox->extoff = off + count;
756+
757+ spin_unlock_irq(&phba->hbalock);
758+
759+ return count;
760+ }
761+
762+ spin_unlock_irq(&phba->hbalock);
763+
764+ if (sysfs_mbox->state == SMBOX_WRITING_BUFF)
765+ return lpfc_syfs_mbox_copy_txmit_buff(phba,
766+ sysfs_mbox, buf, off, count);
767+
768+ if ((count + off) > sizeof(struct lpfc_sysfs_mbox_data)) {
769+ spin_lock_irq(&phba->hbalock);
770+ sysfs_mbox_idle(phba, sysfs_mbox);
771+ spin_unlock_irq(&phba->hbalock);
772+ return -ERANGE;
773+ }
774+
775+ if (off == 0) {
776 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
777- if (!mbox)
778+ if (!mbox) {
779+ spin_lock_irq(&phba->hbalock);
780+ sysfs_mbox_idle(phba, sysfs_mbox);
781+ spin_unlock_irq(&phba->hbalock);
782 return -ENOMEM;
783+ }
784 memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
785 }
786
787 spin_lock_irq(&phba->hbalock);
788
789 if (off == 0) {
790- if (phba->sysfs_mbox.mbox)
791+ if (sysfs_mbox->mbox)
792 mempool_free(mbox, phba->mbox_mem_pool);
793 else
794- phba->sysfs_mbox.mbox = mbox;
795- phba->sysfs_mbox.state = SMBOX_WRITING;
796+ sysfs_mbox->mbox = mbox;
797+ sysfs_mbox->state = SMBOX_WRITING;
798 } else {
799- if (phba->sysfs_mbox.state != SMBOX_WRITING ||
800- phba->sysfs_mbox.offset != off ||
801- phba->sysfs_mbox.mbox == NULL) {
802- sysfs_mbox_idle(phba);
803+ if (sysfs_mbox->state != SMBOX_WRITING ||
804+ sysfs_mbox->offset != off ||
805+ sysfs_mbox->mbox == NULL) {
806+ sysfs_mbox_idle(phba, sysfs_mbox);
807 spin_unlock_irq(&phba->hbalock);
808 return -EAGAIN;
809 }
810 }
811
812- memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
813+ memcpy((uint8_t *) & sysfs_mbox->mbox_data + off,
814 buf, count);
815
816- phba->sysfs_mbox.offset = off + count;
817+ sysfs_mbox->offset = off + count;
818+
819+ if (sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) {
820+ memcpy((uint8_t *) & sysfs_mbox->mbox->mb,
821+ (uint8_t *) &sysfs_mbox->mbox_data.mbox,
822+ sizeof(MAILBOX_t));
823+ }
824+
825+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
826+ (sysfs_mbox->mbox_data.in_ext_wlen ||
827+ sysfs_mbox->mbox_data.out_ext_wlen)) {
828+
829+ if (!sysfs_mbox->mbext) {
830+ spin_unlock_irq(&phba->hbalock);
831+
832+ ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
833+ if (!ext) {
834+ spin_lock_irq(&phba->hbalock);
835+ sysfs_mbox_idle(phba, sysfs_mbox);
836+ spin_unlock_irq(&phba->hbalock);
837+ return -ENOMEM;
838+ }
839+
840+ spin_lock_irq(&phba->hbalock);
841+ sysfs_mbox->mbext = ext;
842+ }
843+ }
844+
845+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
846+ (sysfs_mbox->mbox_data.in_ext_wlen)) {
847+ sysfs_mbox->state = SMBOX_WRITING_MBEXT;
848+ }
849+
850+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
851+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64)) {
852+ sysfs_mbox->state = SMBOX_WRITING_BUFF;
853+ spin_unlock_irq(&phba->hbalock);
854+
855+ /* Allocate txmit buffer */
856+ sysfs_mbox->txmit_buff =
857+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
858+ if (!sysfs_mbox->txmit_buff) {
859+ spin_lock_irq(&phba->hbalock);
860+ sysfs_mbox_idle(phba, sysfs_mbox);
861+ spin_unlock_irq(&phba->hbalock);
862+ return -ENOMEM;
863+ }
864+ INIT_LIST_HEAD(&sysfs_mbox->txmit_buff->list);
865+ sysfs_mbox->txmit_buff->virt =
866+ lpfc_mbuf_alloc(phba, 0,
867+ &(sysfs_mbox->txmit_buff->phys));
868+ if (!sysfs_mbox->txmit_buff->virt) {
869+ spin_lock_irq(&phba->hbalock);
870+ sysfs_mbox_idle(phba, sysfs_mbox);
871+ spin_unlock_irq(&phba->hbalock);
872+ return -ENOMEM;
873+ }
874+
875+ /* Allocate rcv buffer */
876+ sysfs_mbox->rcv_buff =
877+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
878+ if (!sysfs_mbox->rcv_buff) {
879+ spin_lock_irq(&phba->hbalock);
880+ sysfs_mbox_idle(phba, sysfs_mbox);
881+ spin_unlock_irq(&phba->hbalock);
882+ return -ENOMEM;
883+ }
884+ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list);
885+ sysfs_mbox->rcv_buff->virt =
886+ lpfc_mbuf_alloc(phba, 0,
887+ &(sysfs_mbox->rcv_buff->phys));
888+ if (!sysfs_mbox->rcv_buff->virt) {
889+ spin_lock_irq(&phba->hbalock);
890+ sysfs_mbox_idle(phba, sysfs_mbox);
891+ spin_unlock_irq(&phba->hbalock);
892+ return -ENOMEM;
893+ }
894+ return count;
895+ }
896+ if ((sysfs_mbox->offset == sizeof(struct lpfc_sysfs_mbox_data)) &&
897+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG)) {
898+ sysfs_mbox->state = SMBOX_WRITING;
899+ spin_unlock_irq(&phba->hbalock);
900+
901+
902+ /* Allocate rcv buffer */
903+ sysfs_mbox->rcv_buff =
904+ kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
905+ if (!sysfs_mbox->rcv_buff) {
906+ spin_lock_irq(&phba->hbalock);
907+ sysfs_mbox_idle(phba, sysfs_mbox);
908+ spin_unlock_irq(&phba->hbalock);
909+ return -ENOMEM;
910+ }
911+ INIT_LIST_HEAD(&sysfs_mbox->rcv_buff->list);
912+ sysfs_mbox->rcv_buff->virt =
913+ lpfc_mbuf_alloc(phba, 0,
914+ &(sysfs_mbox->rcv_buff->phys));
915+ if (!sysfs_mbox->rcv_buff->virt) {
916+ spin_lock_irq(&phba->hbalock);
917+ sysfs_mbox_idle(phba, sysfs_mbox);
918+ spin_unlock_irq(&phba->hbalock);
919+ return -ENOMEM;
920+ }
921+ return count;
922+ }
923
924 spin_unlock_irq(&phba->hbalock);
925
926@@ -3095,6 +3805,42 @@ sysfs_mbox_read(struct kobject *kobj, st
927 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
928 struct lpfc_hba *phba = vport->phba;
929 int rc;
930+ int wait_4_menlo_maint = 0;
931+ struct lpfc_sysfs_mbox *sysfs_mbox;
932+ ssize_t ret;
933+ sysfs_mbox = lpfc_get_sysfs_mbox(phba, 0);
934+
935+ if (!sysfs_mbox)
936+ return -EPERM;
937+
938+ /*
939+ * If sysfs expect the writing of buffer and
940+ * app doesnot know how to do it, fail the mailbox
941+ * command.
942+ */
943+ if ((sysfs_mbox->state == SMBOX_WRITING_BUFF) &&
944+ (sysfs_mbox->extoff == 0)) {
945+ spin_lock_irq(&phba->hbalock);
946+ sysfs_mbox_idle(phba, sysfs_mbox);
947+ spin_unlock_irq(&phba->hbalock);
948+ return -EINVAL;
949+ }
950+ if (sysfs_mbox->state == SMBOX_READING_BUFF) {
951+ ret = lpfc_syfs_mbox_copy_rcv_buff(phba, sysfs_mbox,
952+ buf, off, count);
953+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
954+ "1245 mbox: cmd 0x%x, 0x%x ret %x\n",
955+ sysfs_mbox->mbox->mb.mbxCommand,
956+ sysfs_mbox->mbox->mb.un.varWords[0],
957+ (uint32_t)ret);
958+ return ret;
959+ }
960+
961+ if (sysfs_mbox->state == SMBOX_READING_MBEXT) {
962+ ret = lpfc_syfs_mbox_copy_extdata(phba, sysfs_mbox,
963+ buf, off, count);
964+ return ret;
965+ }
966
967 if (off > MAILBOX_CMD_SIZE)
968 return -ERANGE;
969@@ -3111,16 +3857,18 @@ sysfs_mbox_read(struct kobject *kobj, st
970 spin_lock_irq(&phba->hbalock);
971
972 if (phba->over_temp_state == HBA_OVER_TEMP) {
973- sysfs_mbox_idle(phba);
974+ sysfs_mbox_idle(phba, sysfs_mbox);
975 spin_unlock_irq(&phba->hbalock);
976 return -EACCES;
977 }
978
979 if (off == 0 &&
980- phba->sysfs_mbox.state == SMBOX_WRITING &&
981- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
982+ ((sysfs_mbox->state == SMBOX_WRITING) ||
983+ (sysfs_mbox->state == SMBOX_WRITING_MBEXT) ||
984+ (sysfs_mbox->state == SMBOX_WRITING_BUFF) ) &&
985+ sysfs_mbox->offset >= 2 * sizeof(uint32_t)) {
986
987- switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
988+ switch (sysfs_mbox->mbox->mb.mbxCommand) {
989 /* Offline only */
990 case MBX_INIT_LINK:
991 case MBX_DOWN_LINK:
992@@ -3133,12 +3881,11 @@ sysfs_mbox_read(struct kobject *kobj, st
993 case MBX_RUN_DIAGS:
994 case MBX_RESTART:
995 case MBX_SET_MASK:
996- case MBX_SET_DEBUG:
997 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
998 printk(KERN_WARNING "mbox_read:Command 0x%x "
999 "is illegal in on-line state\n",
1000- phba->sysfs_mbox.mbox->mb.mbxCommand);
1001- sysfs_mbox_idle(phba);
1002+ sysfs_mbox->mbox->mb.mbxCommand);
1003+ sysfs_mbox_idle(phba,sysfs_mbox);
1004 spin_unlock_irq(&phba->hbalock);
1005 return -EPERM;
1006 }
1007@@ -3160,11 +3907,63 @@ sysfs_mbox_read(struct kobject *kobj, st
1008 case MBX_LOAD_EXP_ROM:
1009 case MBX_BEACON:
1010 case MBX_DEL_LD_ENTRY:
1011- case MBX_SET_VARIABLE:
1012+ case MBX_SET_DEBUG:
1013 case MBX_WRITE_WWN:
1014+ case MBX_READ_EVENT_LOG_STATUS:
1015+ case MBX_WRITE_EVENT_LOG:
1016 case MBX_PORT_CAPABILITIES:
1017 case MBX_PORT_IOV_CONTROL:
1018 break;
1019+ case MBX_SET_VARIABLE:
1020+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1021+ "1226 mbox: set_variable 0x%x, 0x%x\n",
1022+ sysfs_mbox->mbox->mb.un.varWords[0],
1023+ sysfs_mbox->mbox->mb.un.varWords[1]);
1024+ if ((sysfs_mbox->mbox->mb.un.varWords[0] ==
1025+ SETVAR_MLOMNT) &&
1026+ (sysfs_mbox->mbox->mb.un.varWords[1] == 1)) {
1027+ wait_4_menlo_maint = 1;
1028+ phba->wait_4_mlo_maint_flg = 1;
1029+ }
1030+ break;
1031+ case MBX_RUN_BIU_DIAG64:
1032+ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1033+ xmit_bde64.tus.f.bdeSize) {
1034+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1035+ xmit_bde64.addrHigh =
1036+ putPaddrHigh(sysfs_mbox->
1037+ txmit_buff->phys);
1038+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1039+ xmit_bde64.addrLow =
1040+ putPaddrLow(sysfs_mbox->
1041+ txmit_buff->phys);
1042+ }
1043+
1044+ if (sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1045+ rcv_bde64.tus.f.bdeSize) {
1046+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1047+ rcv_bde64.addrHigh =
1048+ putPaddrHigh(sysfs_mbox->
1049+ rcv_buff->phys);
1050+ sysfs_mbox->mbox->mb.un.varBIUdiag.un.s2.
1051+ rcv_bde64.addrLow =
1052+ putPaddrLow(sysfs_mbox->rcv_buff->phys);
1053+ }
1054+ break;
1055+ case MBX_READ_EVENT_LOG:
1056+
1057+ if (sysfs_mbox->mbox->mb.un.varRdEventLog.
1058+ rcv_bde64.tus.f.bdeSize) {
1059+ sysfs_mbox->mbox->mb.un.varRdEventLog.
1060+ rcv_bde64.addrHigh =
1061+ putPaddrHigh(sysfs_mbox->
1062+ rcv_buff->phys);
1063+ sysfs_mbox->mbox->mb.un.varRdEventLog.
1064+ rcv_bde64.addrLow =
1065+ putPaddrLow(sysfs_mbox->rcv_buff->phys);
1066+ }
1067+ break;
1068+
1069 case MBX_READ_SPARM64:
1070 case MBX_READ_LA:
1071 case MBX_READ_LA64:
1072@@ -3173,38 +3972,51 @@ sysfs_mbox_read(struct kobject *kobj, st
1073 case MBX_CONFIG_PORT:
1074 case MBX_RUN_BIU_DIAG:
1075 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
1076- phba->sysfs_mbox.mbox->mb.mbxCommand);
1077- sysfs_mbox_idle(phba);
1078+ sysfs_mbox->mbox->mb.mbxCommand);
1079+ sysfs_mbox_idle(phba,sysfs_mbox);
1080 spin_unlock_irq(&phba->hbalock);
1081 return -EPERM;
1082 default:
1083 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
1084- phba->sysfs_mbox.mbox->mb.mbxCommand);
1085- sysfs_mbox_idle(phba);
1086+ sysfs_mbox->mbox->mb.mbxCommand);
1087+ sysfs_mbox_idle(phba,sysfs_mbox);
1088 spin_unlock_irq(&phba->hbalock);
1089 return -EPERM;
1090 }
1091
1092+ if (sysfs_mbox->mbox_data.in_ext_wlen ||
1093+ sysfs_mbox->mbox_data.out_ext_wlen) {
1094+ sysfs_mbox->mbox->context2 = sysfs_mbox->mbext;
1095+ sysfs_mbox->mbox->in_ext_byte_len =
1096+ sysfs_mbox->mbox_data.in_ext_wlen *
1097+ sizeof(uint32_t);
1098+ sysfs_mbox->mbox->out_ext_byte_len =
1099+ sysfs_mbox->mbox_data.out_ext_wlen *
1100+ sizeof(uint32_t);
1101+ sysfs_mbox->mbox->mbox_offset_word =
1102+ sysfs_mbox->mbox_data.mboffset;
1103+ }
1104+
1105 /* If HBA encountered an error attention, allow only DUMP
1106 * or RESTART mailbox commands until the HBA is restarted.
1107 */
1108 if (phba->pport->stopped &&
1109- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
1110- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
1111- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
1112- phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
1113+ sysfs_mbox->mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
1114+ sysfs_mbox->mbox->mb.mbxCommand != MBX_RESTART &&
1115+ sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
1116+ sysfs_mbox->mbox->mb.mbxCommand != MBX_WRITE_WWN)
1117 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
1118 "1259 mbox: Issued mailbox cmd "
1119 "0x%x while in stopped state.\n",
1120- phba->sysfs_mbox.mbox->mb.mbxCommand);
1121+ sysfs_mbox->mbox->mb.mbxCommand);
1122
1123- phba->sysfs_mbox.mbox->vport = vport;
1124+ sysfs_mbox->mbox->vport = vport;
1125
1126 /* Don't allow mailbox commands to be sent when blocked
1127 * or when in the middle of discovery
1128 */
1129 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1130- sysfs_mbox_idle(phba);
1131+ sysfs_mbox_idle(phba,sysfs_mbox);
1132 spin_unlock_irq(&phba->hbalock);
1133 return -EAGAIN;
1134 }
1135@@ -3214,43 +4026,86 @@ sysfs_mbox_read(struct kobject *kobj, st
1136
1137 spin_unlock_irq(&phba->hbalock);
1138 rc = lpfc_sli_issue_mbox (phba,
1139- phba->sysfs_mbox.mbox,
1140+ sysfs_mbox->mbox,
1141 MBX_POLL);
1142 spin_lock_irq(&phba->hbalock);
1143
1144 } else {
1145 spin_unlock_irq(&phba->hbalock);
1146 rc = lpfc_sli_issue_mbox_wait (phba,
1147- phba->sysfs_mbox.mbox,
1148+ sysfs_mbox->mbox,
1149 lpfc_mbox_tmo_val(phba,
1150- phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1151+ sysfs_mbox->mbox->mb.mbxCommand) * HZ);
1152 spin_lock_irq(&phba->hbalock);
1153 }
1154
1155 if (rc != MBX_SUCCESS) {
1156 if (rc == MBX_TIMEOUT) {
1157- phba->sysfs_mbox.mbox = NULL;
1158+ sysfs_mbox->mbox = NULL;
1159 }
1160- sysfs_mbox_idle(phba);
1161+ sysfs_mbox_idle(phba,sysfs_mbox);
1162 spin_unlock_irq(&phba->hbalock);
1163 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1164 }
1165- phba->sysfs_mbox.state = SMBOX_READING;
1166+ if (wait_4_menlo_maint) {
1167+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1168+ "1229 waiting for menlo mnt\n");
1169+ spin_unlock_irq(&phba->hbalock);
1170+ if (phba->wait_4_mlo_maint_flg)
1171+ wait_event_interruptible_timeout(
1172+ phba->wait_4_mlo_m_q,
1173+ phba->wait_4_mlo_maint_flg == 0,
1174+ 60 * HZ);
1175+ spin_lock_irq(&phba->hbalock);
1176+ if (phba->wait_4_mlo_maint_flg) {
1177+ sysfs_mbox_idle(phba,sysfs_mbox);
1178+ phba->wait_4_mlo_maint_flg = 0;
1179+ spin_unlock_irq(&phba->hbalock);
1180+ return -EINTR;
1181+ } else
1182+ spin_unlock_irq(&phba->hbalock);
1183+
1184+ spin_lock_irq(&phba->hbalock);
1185+ if (phba->wait_4_mlo_maint_flg != 0) {
1186+ sysfs_mbox_idle(phba,sysfs_mbox);
1187+ phba->wait_4_mlo_maint_flg = 0;
1188+ spin_unlock_irq(&phba->hbalock);
1189+ return -ETIME;
1190+ }
1191+
1192+ }
1193+ sysfs_mbox->state = SMBOX_READING;
1194 }
1195- else if (phba->sysfs_mbox.offset != off ||
1196- phba->sysfs_mbox.state != SMBOX_READING) {
1197- printk(KERN_WARNING "mbox_read: Bad State\n");
1198- sysfs_mbox_idle(phba);
1199+ else if (sysfs_mbox->offset != off ||
1200+ sysfs_mbox->state != SMBOX_READING) {
1201+ sysfs_mbox_idle(phba,sysfs_mbox);
1202 spin_unlock_irq(&phba->hbalock);
1203 return -EAGAIN;
1204 }
1205
1206- memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
1207+ memcpy(buf, (uint8_t *) & sysfs_mbox->mbox->mb + off, count);
1208+
1209+ sysfs_mbox->offset = off + count;
1210
1211- phba->sysfs_mbox.offset = off + count;
1212+ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) &&
1213+ ((sysfs_mbox->mbox->mb.mbxCommand == MBX_RUN_BIU_DIAG64) ||
1214+ (sysfs_mbox->mbox->mb.mbxCommand == MBX_READ_EVENT_LOG))) {
1215+ sysfs_mbox->state = SMBOX_READING_BUFF;
1216+ sysfs_mbox->extoff = 0;
1217+ spin_unlock_irq(&phba->hbalock);
1218+ return count;
1219+ }
1220+
1221+ if ((sysfs_mbox->offset == MAILBOX_CMD_SIZE) &&
1222+ sysfs_mbox->mbox_data.out_ext_wlen) {
1223+ sysfs_mbox->state = SMBOX_READING_MBEXT;
1224+ sysfs_mbox->extoff = 0;
1225+ spin_unlock_irq(&phba->hbalock);
1226+ return count;
1227+ }
1228
1229- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
1230- sysfs_mbox_idle(phba);
1231+ if (sysfs_mbox->offset == MAILBOX_CMD_SIZE)
1232+ sysfs_mbox_idle(phba,sysfs_mbox);
1233
1234 spin_unlock_irq(&phba->hbalock);
1235
1236@@ -3262,7 +4117,7 @@ static struct bin_attribute sysfs_mbox_a
1237 .name = "mbox",
1238 .mode = S_IRUSR | S_IWUSR,
1239 },
1240- .size = MAILBOX_CMD_SIZE,
1241+ .size = MAILBOX_MAX_XMIT_SIZE,
1242 .read = sysfs_mbox_read,
1243 .write = sysfs_mbox_write,
1244 };
1245@@ -3317,6 +4172,7 @@ lpfc_free_sysfs_attr(struct lpfc_vport *
1246 &sysfs_drvr_stat_data_attr);
1247 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
1248 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
1249+ sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_menlo_attr);
1250 }
1251
1252
1253@@ -3935,7 +4791,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1254 phba->cfg_soft_wwpn = 0L;
1255 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
1256 /* Also reinitialize the host templates with new values. */
1257- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
1258 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
1259 /*
1260 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
1261@@ -3970,5 +4825,6 @@ lpfc_get_vport_cfgparam(struct lpfc_vpor
1262 lpfc_max_luns_init(vport, lpfc_max_luns);
1263 lpfc_scan_down_init(vport, lpfc_scan_down);
1264 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
1265+ lpfc_enable_auth_init(vport, lpfc_enable_auth);
1266 return;
1267 }
1268--- /dev/null
1269+++ b/drivers/scsi/lpfc/lpfc_auth_access.c
1270@@ -0,0 +1,598 @@
1271+/*******************************************************************
1272+ * This file is part of the Emulex Linux Device Driver for *
1273+ * Fibre Channel Host Bus Adapters. *
1274+ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
1275+ * EMULEX and SLI are trademarks of Emulex. *
1276+ * www.emulex.com *
1277+ * *
1278+ * This program is free software; you can redistribute it and/or *
1279+ * modify it under the terms of version 2 of the GNU General *
1280+ * Public License as published by the Free Software Foundation. *
1281+ * This program is distributed in the hope that it will be useful. *
1282+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
1283+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
1284+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
1285+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
1286+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
1287+ * more details, a copy of which can be found in the file COPYING *
1288+ * included with this package. *
1289+ *******************************************************************/
1290+#include <linux/blkdev.h>
1291+#include <linux/pci.h>
1292+#include <linux/kthread.h>
1293+#include <linux/interrupt.h>
1294+
1295+#include <linux/module.h>
1296+#include <linux/init.h>
1297+#include <linux/sched.h> /* workqueue stuff, HZ */
1298+#include <scsi/scsi_device.h>
1299+#include <scsi/scsi_host.h>
1300+#include <scsi/scsi_transport.h>
1301+#include <scsi/scsi_transport_fc.h>
1302+#include <scsi/scsi_cmnd.h>
1303+#include <linux/time.h>
1304+#include <linux/jiffies.h>
1305+#include <linux/security.h>
1306+#include <net/sock.h>
1307+#include <net/netlink.h>
1308+
1309+#include <scsi/scsi.h>
1310+
1311+#include "lpfc_hw.h"
1312+#include "lpfc_sli.h"
1313+#include "lpfc_nl.h"
1314+#include "lpfc_disc.h"
1315+#include "lpfc_scsi.h"
1316+#include "lpfc.h"
1317+#include "lpfc_logmsg.h"
1318+#include "lpfc_crtn.h"
1319+#include "lpfc_vport.h"
1320+#include "lpfc_auth_access.h"
1321+
1322+/* fc security */
1323+struct workqueue_struct *security_work_q = NULL;
1324+struct list_head fc_security_user_list;
1325+int fc_service_state = FC_SC_SERVICESTATE_UNKNOWN;
1326+static int fc_service_pid;
1327+DEFINE_SPINLOCK(fc_security_user_lock);
1328+
1329+static inline struct lpfc_vport *
1330+lpfc_fc_find_vport(unsigned long host_no)
1331+{
1332+ struct lpfc_vport *vport;
1333+ struct Scsi_Host *shost;
1334+
1335+ list_for_each_entry(vport, &fc_security_user_list, sc_users) {
1336+ shost = lpfc_shost_from_vport(vport);
1337+ if (shost && (shost->host_no == host_no))
1338+ return vport;
1339+ }
1340+
1341+ return NULL;
1342+}
1343+
1344+
1345+/**
1346+ * lpfc_fc_sc_add_timer
1347+ *
1348+ *
1349+ **/
1350+
1351+void
1352+lpfc_fc_sc_add_timer(struct fc_security_request *req, int timeout,
1353+ void (*complete)(struct fc_security_request *))
1354+{
1355+
1356+ init_timer(&req->timer);
1357+
1358+
1359+ req->timer.data = (unsigned long)req;
1360+ req->timer.expires = jiffies + timeout;
1361+ req->timer.function = (void (*)(unsigned long)) complete;
1362+
1363+ add_timer(&req->timer);
1364+}
1365+/**
1366+ * lpfc_fc_sc_req_times_out
1367+ *
1368+ *
1369+ **/
1370+
1371+void
1372+lpfc_fc_sc_req_times_out(struct fc_security_request *req)
1373+{
1374+
1375+ unsigned long flags;
1376+ int found = 0;
1377+ struct fc_security_request *fc_sc_req;
1378+ struct lpfc_vport *vport = req->vport;
1379+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1380+
1381+ if (!req)
1382+ return;
1383+
1384+ spin_lock_irqsave(shost->host_lock, flags);
1385+
1386+ /* To avoid a completion race check to see if request is on the list */
1387+
1388+ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist)
1389+ if (fc_sc_req == req) {
1390+ found = 1;
1391+ break;
1392+ }
1393+
1394+ if (!found) {
1395+ spin_unlock_irqrestore(shost->host_lock, flags);
1396+ return;
1397+ }
1398+
1399+ list_del(&fc_sc_req->rlist);
1400+
1401+ spin_unlock_irqrestore(shost->host_lock, flags);
1402+
1403+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
1404+ "1019 Request tranid %d timed out\n",
1405+ fc_sc_req->tran_id);
1406+
1407+ switch (fc_sc_req->req_type) {
1408+
1409+ case FC_NL_SC_GET_CONFIG_REQ:
1410+ lpfc_security_config(shost, -ETIMEDOUT,
1411+ fc_sc_req->data);
1412+ break;
1413+
1414+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ:
1415+ lpfc_dhchap_make_challenge(shost, -ETIMEDOUT,
1416+ fc_sc_req->data, 0);
1417+ break;
1418+
1419+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ:
1420+ lpfc_dhchap_make_response(shost, -ETIMEDOUT,
1421+ fc_sc_req->data, 0);
1422+ break;
1423+
1424+ case FC_NL_SC_DHCHAP_AUTHENTICATE_REQ:
1425+ lpfc_dhchap_authenticate(shost, -ETIMEDOUT, fc_sc_req->data, 0);
1426+ break;
1427+ }
1428+
1429+ kfree(fc_sc_req);
1430+
1431+}
1432+
1433+
1434+static inline struct fc_security_request *
1435+lpfc_fc_find_sc_request(u32 tran_id, u32 type, struct lpfc_vport *vport)
1436+{
1437+ struct fc_security_request *fc_sc_req;
1438+
1439+ list_for_each_entry(fc_sc_req, &vport->sc_response_wait_queue, rlist)
1440+ if (fc_sc_req->tran_id == tran_id &&
1441+ fc_sc_req->req_type == type)
1442+ return fc_sc_req;
1443+ return NULL;
1444+}
1445+
1446+
1447+
1448+/**
1449+ * lpfc_fc_sc_request
1450+ *
1451+ *
1452+ **/
1453+
1454+int
1455+lpfc_fc_sc_request(struct lpfc_vport *vport,
1456+ u32 msg_type,
1457+ struct fc_auth_req *auth_req,
1458+ u32 auth_req_len, /* includes length of struct fc_auth_req */
1459+ struct fc_auth_rsp *auth_rsp,
1460+ u32 auth_rsp_len) /* includes length of struct fc_auth_rsp */
1461+{
1462+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1463+ struct fc_security_request *fc_sc_req;
1464+ struct fc_nl_sc_message *fc_nl_sc_msg;
1465+ unsigned long flags;
1466+ u32 len;
1467+ u32 seq = ++vport->sc_tran_id;
1468+
1469+ if (fc_service_state != FC_SC_SERVICESTATE_ONLINE)
1470+ return -EINVAL;
1471+
1472+ if (vport->port_state == FC_PORTSTATE_DELETED)
1473+ return -EINVAL;
1474+
1475+ fc_sc_req = kzalloc(sizeof(struct fc_security_request), GFP_KERNEL);
1476+ if (!fc_sc_req)
1477+ return -ENOMEM;
1478+
1479+ fc_sc_req->req_type = msg_type;
1480+ fc_sc_req->data = auth_rsp;
1481+ fc_sc_req->data_len = auth_rsp_len;
1482+ fc_sc_req->vport = vport;
1483+ fc_sc_req->tran_id = seq;
1484+
1485+ len = sizeof(struct fc_nl_sc_message) + auth_req_len;
1486+ fc_nl_sc_msg = kzalloc(sizeof(struct fc_nl_sc_message) + auth_req_len,
1487+ GFP_KERNEL);
1488+ if (!fc_nl_sc_msg)
1489+ return -ENOMEM;
1490+ fc_nl_sc_msg->msgtype = msg_type;
1491+ fc_nl_sc_msg->data_len = auth_req_len;
1492+ memcpy(fc_nl_sc_msg->data, auth_req, auth_req_len);
1493+ fc_nl_sc_msg->tran_id = seq;
1494+
1495+ spin_lock_irqsave(shost->host_lock, flags);
1496+ list_add_tail(&fc_sc_req->rlist, &vport->sc_response_wait_queue);
1497+ spin_unlock_irqrestore(shost->host_lock, flags);
1498+ scsi_nl_send_vendor_msg(fc_service_pid, shost->host_no,
1499+ (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX),
1500+ (char *) fc_nl_sc_msg, len);
1501+ lpfc_fc_sc_add_timer(fc_sc_req, FC_SC_REQ_TIMEOUT,
1502+ lpfc_fc_sc_req_times_out);
1503+ return 0;
1504+}
1505+
1506+/**
1507+ * lpfc_fc_security_get_config
1508+ *
1509+ *
1510+ **/
1511+
1512+int
1513+lpfc_fc_security_get_config(struct Scsi_Host *shost,
1514+ struct fc_auth_req *auth_req,
1515+ u32 auth_req_len,
1516+ struct fc_auth_rsp *auth_rsp,
1517+ u32 auth_rsp_len)
1518+{
1519+
1520+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
1521+ FC_NL_SC_GET_CONFIG_REQ, auth_req,
1522+ auth_req_len, auth_rsp, auth_rsp_len));
1523+
1524+}
1525+EXPORT_SYMBOL(lpfc_fc_security_get_config);
1526+
1527+/**
1528+ * lpfc_fc_security_dhchap_make_challenge
1529+ *
1530+ *
1531+ **/
1532+
1533+int
1534+lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost,
1535+ struct fc_auth_req *auth_req,
1536+ u32 auth_req_len,
1537+ struct fc_auth_rsp *auth_rsp,
1538+ u32 auth_rsp_len)
1539+{
1540+
1541+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
1542+ FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ,
1543+ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
1544+
1545+}
1546+EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_challenge);
1547+
1548+/**
1549+ * lpfc_fc_security_dhchap_make_response
1550+ *
1551+ *
1552+ **/
1553+
1554+int
1555+lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost,
1556+ struct fc_auth_req *auth_req,
1557+ u32 auth_req_len,
1558+ struct fc_auth_rsp *auth_rsp,
1559+ u32 auth_rsp_len)
1560+{
1561+
1562+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
1563+ FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ,
1564+ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
1565+
1566+}
1567+EXPORT_SYMBOL(lpfc_fc_security_dhchap_make_response);
1568+
1569+
1570+/**
1571+ * lpfc_fc_security_dhchap_authenticate
1572+ *
1573+ *
1574+ **/
1575+
1576+int
1577+lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost,
1578+ struct fc_auth_req *auth_req,
1579+ u32 auth_req_len,
1580+ struct fc_auth_rsp *auth_rsp,
1581+ u32 auth_rsp_len)
1582+{
1583+
1584+ return(lpfc_fc_sc_request((struct lpfc_vport *) shost->hostdata,
1585+ FC_NL_SC_DHCHAP_AUTHENTICATE_REQ,
1586+ auth_req, auth_req_len, auth_rsp, auth_rsp_len));
1587+
1588+}
1589+EXPORT_SYMBOL(lpfc_fc_security_dhchap_authenticate);
1590+
1591+/**
1592+ * lpfc_fc_queue_security_work - Queue work to the fc_host security workqueue.
1593+ * @shost: Pointer to Scsi_Host bound to fc_host.
1594+ * @work: Work to queue for execution.
1595+ *
1596+ * Return value:
1597+ * 1 - work queued for execution
1598+ * 0 - work is already queued
1599+ * -EINVAL - work queue doesn't exist
1600+ **/
1601+int
1602+lpfc_fc_queue_security_work(struct lpfc_vport *vport, struct work_struct *work)
1603+{
1604+ if (unlikely(!security_work_q)) {
1605+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
1606+ "1021 ERROR: attempted to queue security work, "
1607+ "when no workqueue created.\n");
1608+ dump_stack();
1609+
1610+ return -EINVAL;
1611+ }
1612+
1613+ return queue_work(security_work_q, work);
1614+
1615+}
1616+
1617+
1618+
1619+ /**
1620+ * lpfc_fc_sc_schedule_notify_all
1621+ *
1622+ *
1623+ **/
1624+
1625+void
1626+lpfc_fc_sc_schedule_notify_all(int message)
1627+{
1628+ struct lpfc_vport *vport;
1629+ unsigned long flags;
1630+
1631+ spin_lock_irqsave(&fc_security_user_lock, flags);
1632+
1633+ list_for_each_entry(vport, &fc_security_user_list, sc_users) {
1634+
1635+ switch (message) {
1636+
1637+ case FC_NL_SC_REG:
1638+ lpfc_fc_queue_security_work(vport,
1639+ &vport->sc_online_work);
1640+ break;
1641+
1642+ case FC_NL_SC_DEREG:
1643+ lpfc_fc_queue_security_work(vport,
1644+ &vport->sc_offline_work);
1645+ break;
1646+ }
1647+ }
1648+
1649+ spin_unlock_irqrestore(&fc_security_user_lock, flags);
1650+}
1651+
1652+
1653+
1654+/**
1655+ * lpfc_fc_sc_security_online
1656+ *
1657+ *
1658+ **/
1659+
1660+void
1661+lpfc_fc_sc_security_online(struct work_struct *work)
1662+{
1663+ struct lpfc_vport *vport = container_of(work, struct lpfc_vport,
1664+ sc_online_work);
1665+ lpfc_security_service_online(lpfc_shost_from_vport(vport));
1666+ return;
1667+}
1668+
1669+/**
1670+ * lpfc_fc_sc_security_offline
1671+ *
1672+ *
1673+ **/
1674+void
1675+lpfc_fc_sc_security_offline(struct work_struct *work)
1676+{
1677+ struct lpfc_vport *vport = container_of(work, struct lpfc_vport,
1678+ sc_offline_work);
1679+ lpfc_security_service_offline(lpfc_shost_from_vport(vport));
1680+ return;
1681+}
1682+
1683+
1684+/**
1685+ * lpfc_fc_sc_process_msg
1686+ *
1687+ *
1688+ **/
1689+static void
1690+lpfc_fc_sc_process_msg(struct work_struct *work)
1691+{
1692+ struct fc_sc_msg_work_q_wrapper *wqw =
1693+ container_of(work, struct fc_sc_msg_work_q_wrapper, work);
1694+
1695+ switch (wqw->msgtype) {
1696+
1697+ case FC_NL_SC_GET_CONFIG_RSP:
1698+ lpfc_security_config(lpfc_shost_from_vport(wqw->fc_sc_req->
1699+ vport), wqw->status,
1700+ wqw->fc_sc_req->data);
1701+ break;
1702+
1703+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
1704+ lpfc_dhchap_make_challenge(lpfc_shost_from_vport(wqw->
1705+ fc_sc_req->vport), wqw->status,
1706+ wqw->fc_sc_req->data, wqw->data_len);
1707+ break;
1708+
1709+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
1710+ lpfc_dhchap_make_response(lpfc_shost_from_vport(wqw->
1711+ fc_sc_req->vport), wqw->status,
1712+ wqw->fc_sc_req->data, wqw->data_len);
1713+ break;
1714+
1715+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
1716+ lpfc_dhchap_authenticate(lpfc_shost_from_vport(wqw->fc_sc_req->
1717+ vport),
1718+ wqw->status,
1719+ wqw->fc_sc_req->data, wqw->data_len);
1720+ break;
1721+ }
1722+
1723+ kfree(wqw->fc_sc_req);
1724+ kfree(wqw);
1725+
1726+ return;
1727+}
1728+
1729+
1730+/**
1731+ * lpfc_fc_sc_schedule_msg
1732+ *
1733+ *
1734+ **/
1735+
1736+int
1737+lpfc_fc_sc_schedule_msg(struct Scsi_Host *shost,
1738+ struct fc_nl_sc_message *fc_nl_sc_msg, int rcvlen)
1739+{
1740+ struct fc_security_request *fc_sc_req;
1741+ u32 req_type;
1742+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1743+ int err = 0;
1744+ struct fc_sc_msg_work_q_wrapper *wqw;
1745+ unsigned long flags;
1746+
1747+ if (vport->port_state == FC_PORTSTATE_DELETED) {
1748+ printk(KERN_WARNING
1749+ "%s: Host being deleted.\n", __func__);
1750+ return -EBADR;
1751+ }
1752+
1753+ wqw = kzalloc(sizeof(struct fc_sc_msg_work_q_wrapper), GFP_KERNEL);
1754+
1755+ if (!wqw)
1756+ return -ENOMEM;
1757+
1758+ switch (fc_nl_sc_msg->msgtype) {
1759+ case FC_NL_SC_GET_CONFIG_RSP:
1760+ req_type = FC_NL_SC_GET_CONFIG_REQ;
1761+ break;
1762+
1763+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
1764+ req_type = FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ;
1765+ break;
1766+
1767+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
1768+ req_type = FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ;
1769+ break;
1770+
1771+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
1772+ req_type = FC_NL_SC_DHCHAP_AUTHENTICATE_REQ;
1773+ break;
1774+
1775+ default:
1776+ kfree(wqw);
1777+ return -EINVAL;
1778+ }
1779+
1780+ spin_lock_irqsave(shost->host_lock, flags);
1781+
1782+ fc_sc_req = lpfc_fc_find_sc_request(fc_nl_sc_msg->tran_id,
1783+ req_type, vport);
1784+
1785+ if (!fc_sc_req) {
1786+ spin_unlock_irqrestore(shost->host_lock, flags);
1787+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
1788+ "1022 Security request does not exist.\n");
1789+ kfree(wqw);
1790+ return -EBADR;
1791+ }
1792+
1793+ list_del(&fc_sc_req->rlist);
1794+
1795+ spin_unlock_irqrestore(shost->host_lock, flags);
1796+
1797+ del_singleshot_timer_sync(&fc_sc_req->timer);
1798+
1799+ wqw->status = 0;
1800+ wqw->fc_sc_req = fc_sc_req;
1801+ wqw->data_len = rcvlen;
1802+ wqw->msgtype = fc_nl_sc_msg->msgtype;
1803+
1804+ if (!fc_sc_req->data ||
1805+ (fc_sc_req->data_len < fc_nl_sc_msg->data_len)) {
1806+ wqw->status = -ENOBUFS;
1807+ wqw->data_len = 0;
1808+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
1809+ "1023 Warning - data may have been truncated. "
1810+ "data:%p reqdl:%x mesdl:%x\n",
1811+ fc_sc_req->data,
1812+ fc_sc_req->data_len, fc_nl_sc_msg->data_len);
1813+ } else {
1814+ memcpy(fc_sc_req->data, fc_nl_sc_msg->data,
1815+ fc_nl_sc_msg->data_len);
1816+ }
1817+
1818+ INIT_WORK(&wqw->work, lpfc_fc_sc_process_msg);
1819+ lpfc_fc_queue_security_work(vport, &wqw->work);
1820+
1821+ return err;
1822+}
1823+
1824+int
1825+lpfc_rcv_nl_msg(struct Scsi_Host *shost, void *payload,
1826+ uint32_t len, uint32_t pid)
1827+{
1828+ struct fc_nl_sc_message *msg = (struct fc_nl_sc_message *)payload;
1829+ int err = 0;
1830+
1831+ switch (msg->msgtype) {
1832+ case FC_NL_SC_REG:
1833+ fc_service_pid = pid;
1834+ fc_service_state = FC_SC_SERVICESTATE_ONLINE;
1835+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_REG);
1836+ break;
1837+ case FC_NL_SC_DEREG:
1838+ fc_service_pid = pid;
1839+ fc_service_state = FC_SC_SERVICESTATE_OFFLINE;
1840+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG);
1841+ break;
1842+ case FC_NL_SC_GET_CONFIG_RSP:
1843+ case FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP:
1844+ case FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP:
1845+ case FC_NL_SC_DHCHAP_AUTHENTICATE_RSP:
1846+ err = lpfc_fc_sc_schedule_msg(shost, msg, len);
1847+ break;
1848+ default:
1849+ printk(KERN_WARNING "%s: unknown msg type 0x%x len %d\n",
1850+ __func__, msg->msgtype, len);
1851+ break;
1852+ }
1853+ return err;
1854+}
1855+
1856+void
1857+lpfc_rcv_nl_event(struct notifier_block *this,
1858+ unsigned long event,
1859+ void *ptr)
1860+{
1861+ struct netlink_notify *n = ptr;
1862+ if ((event == NETLINK_URELEASE) &&
1863+ (n->protocol == NETLINK_SCSITRANSPORT) && (n->pid)) {
1864+ printk(KERN_WARNING "Warning - Security Service Offline\n");
1865+ fc_service_state = FC_SC_SERVICESTATE_OFFLINE;
1866+ lpfc_fc_sc_schedule_notify_all(FC_NL_SC_DEREG);
1867+ }
1868+}
1869--- /dev/null
1870+++ b/drivers/scsi/lpfc/lpfc_auth_access.h
1871@@ -0,0 +1,245 @@
1872+/*******************************************************************
1873+ * This file is part of the Emulex Linux Device Driver for *
1874+ * Fibre Channel Host Bus Adapters. *
1875+ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
1876+ * EMULEX and SLI are trademarks of Emulex. *
1877+ * www.emulex.com *
1878+ * *
1879+ * This program is free software; you can redistribute it and/or *
1880+ * modify it under the terms of version 2 of the GNU General *
1881+ * Public License as published by the Free Software Foundation. *
1882+ * This program is distributed in the hope that it will be useful. *
1883+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
1884+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
1885+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
1886+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
1887+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
1888+ * more details, a copy of which can be found in the file COPYING *
1889+ * included with this package. *
1890+ *******************************************************************/
1891+
1892+#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
1893+
1894+/* scsi_nl_hdr->version value */
1895+#define SCSI_NL_VERSION 1
1896+
1897+/* scsi_nl_hdr->magic value */
1898+#define SCSI_NL_MAGIC 0xA1B2
1899+
1900+/* scsi_nl_hdr->transport value */
1901+#define SCSI_NL_TRANSPORT 0
1902+#define SCSI_NL_TRANSPORT_FC 1
1903+#define SCSI_NL_MAX_TRANSPORTS 2
1904+
1905+#define FC_NL_GROUP_CNT 0
1906+
1907+ /* Note: when specifying vendor_id to fc_host_post_vendor_event()
1908+ * be sure to read the Vendor Type and ID formatting requirements
1909+ * specified in scsi_netlink.h
1910+ */
1911+
1912+#define FC_SC_REQ_TIMEOUT (60*HZ)
1913+
1914+enum fc_sc_service_state {
1915+ FC_SC_SERVICESTATE_UNKNOWN,
1916+ FC_SC_SERVICESTATE_ONLINE,
1917+ FC_SC_SERVICESTATE_OFFLINE,
1918+ FC_SC_SERVICESTATE_ERROR,
1919+};
1920+
1921+struct fc_security_request {
1922+ struct list_head rlist;
1923+ int pid;
1924+ u32 tran_id;
1925+ u32 req_type;
1926+ struct timer_list timer;
1927+ struct lpfc_vport *vport;
1928+ u32 data_len;
1929+ void *data;
1930+};
1931+
1932+struct fc_sc_msg_work_q_wrapper {
1933+ struct work_struct work;
1934+ struct fc_security_request *fc_sc_req;
1935+ u32 data_len;
1936+ int status;
1937+ u32 msgtype;
1938+};
1939+struct fc_sc_notify_work_q_wrapper {
1940+ struct work_struct work;
1941+ struct Scsi_Host *shost;
1942+ int msg;
1943+};
1944+
1945+#define FC_DHCHAP 1
1946+#define FC_FCAP 2
1947+#define FC_FCPAP 3
1948+#define FC_KERBEROS 4
1949+
1950+#define FC_AUTHMODE_UNKNOWN 0
1951+#define FC_AUTHMODE_NONE 1
1952+#define FC_AUTHMODE_ACTIVE 2
1953+#define FC_AUTHMODE_PASSIVE 3
1954+
1955+#define FC_SP_HASH_MD5 0x5
1956+#define FC_SP_HASH_SHA1 0x6
1957+
1958+#define DH_GROUP_NULL 0x00
1959+#define DH_GROUP_1024 0x01
1960+#define DH_GROUP_1280 0x02
1961+#define DH_GROUP_1536 0x03
1962+#define DH_GROUP_2048 0x04
1963+
1964+#define MAX_AUTH_REQ_SIZE 1024
1965+#define MAX_AUTH_RSP_SIZE 1024
1966+
1967+#define AUTH_FABRIC_WWN 0xFFFFFFFFFFFFFFFFLL
1968+
1969+struct fc_auth_req {
1970+ uint64_t local_wwpn;
1971+ uint64_t remote_wwpn;
1972+ union {
1973+ struct dhchap_challenge_req {
1974+ uint32_t transaction_id;
1975+ uint32_t dh_group_id;
1976+ uint32_t hash_id;
1977+ } dhchap_challenge;
1978+ struct dhchap_reply_req {
1979+ uint32_t transaction_id;
1980+ uint32_t dh_group_id;
1981+ uint32_t hash_id;
1982+ uint32_t bidirectional;
1983+ uint32_t received_challenge_len;
1984+ uint32_t received_public_key_len;
1985+ uint8_t data[0];
1986+ } dhchap_reply;
1987+ struct dhchap_success_req {
1988+ uint32_t transaction_id;
1989+ uint32_t dh_group_id;
1990+ uint32_t hash_id;
1991+ uint32_t our_challenge_len;
1992+ uint32_t received_response_len;
1993+ uint32_t received_public_key_len;
1994+ uint32_t received_challenge_len;
1995+ uint8_t data[0];
1996+ } dhchap_success;
1997+ }u;
1998+} __attribute__ ((packed));
1999+
2000+struct fc_auth_rsp {
2001+ uint64_t local_wwpn;
2002+ uint64_t remote_wwpn;
2003+ union {
2004+ struct authinfo {
2005+ uint8_t auth_mode;
2006+ uint16_t auth_timeout;
2007+ uint8_t bidirectional;
2008+ uint8_t type_priority[4];
2009+ uint16_t type_len;
2010+ uint8_t hash_priority[4];
2011+ uint16_t hash_len;
2012+ uint8_t dh_group_priority[8];
2013+ uint16_t dh_group_len;
2014+ uint32_t reauth_interval;
2015+ } dhchap_security_config;
2016+ struct dhchap_challenge_rsp {
2017+ uint32_t transaction_id;
2018+ uint32_t our_challenge_len;
2019+ uint32_t our_public_key_len;
2020+ uint8_t data[0];
2021+ } dhchap_challenge;
2022+ struct dhchap_reply_rsp {
2023+ uint32_t transaction_id;
2024+ uint32_t our_challenge_rsp_len;
2025+ uint32_t our_public_key_len;
2026+ uint32_t our_challenge_len;
2027+ uint8_t data[0];
2028+ } dhchap_reply;
2029+ struct dhchap_success_rsp {
2030+ uint32_t transaction_id;
2031+ uint32_t authenticated;
2032+ uint32_t response_len;
2033+ uint8_t data[0];
2034+ } dhchap_success;
2035+ }u;
2036+}__attribute__ ((packed));
2037+
2038+int
2039+lpfc_fc_security_get_config(struct Scsi_Host *shost,
2040+ struct fc_auth_req *auth_req,
2041+ u32 req_len,
2042+ struct fc_auth_rsp *auth_rsp,
2043+ u32 rsp_len);
2044+int
2045+lpfc_fc_security_dhchap_make_challenge(struct Scsi_Host *shost,
2046+ struct fc_auth_req *auth_req,
2047+ u32 req_len,
2048+ struct fc_auth_rsp *auth_rsp,
2049+ u32 rsp_len);
2050+int
2051+lpfc_fc_security_dhchap_make_response(struct Scsi_Host *shost,
2052+ struct fc_auth_req *auth_req,
2053+ u32 req_len,
2054+ struct fc_auth_rsp *auth_rsp,
2055+ u32 rsp_len);
2056+int
2057+lpfc_fc_security_dhchap_authenticate(struct Scsi_Host *shost,
2058+ struct fc_auth_req *auth_req,
2059+ u32 req_len,
2060+ struct fc_auth_rsp *auth_rsp,
2061+ u32 rsp_len);
2062+
2063+int lpfc_fc_queue_security_work(struct lpfc_vport *,
2064+ struct work_struct *);
2065+
2066+/*
2067+ * FC Transport Message Types
2068+ */
2069+ /* user -> kernel */
2070+#define FC_NL_EVENTS_REG 0x0001
2071+#define FC_NL_EVENTS_DEREG 0x0002
2072+#define FC_NL_SC_REG 0x0003
2073+#define FC_NL_SC_DEREG 0x0004
2074+#define FC_NL_SC_GET_CONFIG_RSP 0x0005
2075+#define FC_NL_SC_SET_CONFIG_RSP 0x0006
2076+#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_RSP 0x0007
2077+#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_RSP 0x0008
2078+#define FC_NL_SC_DHCHAP_AUTHENTICATE_RSP 0x0009
2079+ /* kernel -> user */
2080+//#define FC_NL_ASYNC_EVENT 0x0100
2081+#define FC_NL_SC_GET_CONFIG_REQ 0x0020
2082+#define FC_NL_SC_SET_CONFIG_REQ 0x0030
2083+#define FC_NL_SC_DHCHAP_MAKE_CHALLENGE_REQ 0x0040
2084+#define FC_NL_SC_DHCHAP_MAKE_RESPONSE_REQ 0x0050
2085+#define FC_NL_SC_DHCHAP_AUTHENTICATE_REQ 0x0060
2086+
2087+/*
2088+ * Message Structures :
2089+ */
2090+
2091+/* macro to round up message lengths to 8byte boundary */
2092+#define FC_NL_MSGALIGN(len) (((len) + 7) & ~7)
2093+
2094+#define FC_NETLINK_API_VERSION 1
2095+
2096+/* Single Netlink Message type to send all FC Transport messages */
2097+#define FC_TRANSPORT_MSG NLMSG_MIN_TYPE + 1
2098+
2099+/* SCSI_TRANSPORT_MSG event message header */
2100+/*
2101+struct scsi_nl_hdr {
2102+ uint8_t version;
2103+ uint8_t transport;
2104+ uint16_t magic;
2105+ uint16_t msgtype;
2106+ uint16_t msglen;
2107+} __attribute__((aligned(sizeof(uint64_t))));
2108+*/
2109+struct fc_nl_sc_message {
2110+ uint16_t msgtype;
2111+ uint16_t rsvd;
2112+ uint32_t tran_id;
2113+ uint32_t data_len;
2114+ uint8_t data[0];
2115+} __attribute__((aligned(sizeof(uint64_t))));
2116+
2117--- /dev/null
2118+++ b/drivers/scsi/lpfc/lpfc_auth.c
2119@@ -0,0 +1,838 @@
2120+/*******************************************************************
2121+ * This file is part of the Emulex Linux Device Driver for *
2122+ * Fibre Channel Host Bus Adapters. *
2123+ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
2124+ * EMULEX and SLI are trademarks of Emulex. *
2125+ * www.emulex.com *
2126+ * *
2127+ * This program is free software; you can redistribute it and/or *
2128+ * modify it under the terms of version 2 of the GNU General *
2129+ * Public License as published by the Free Software Foundation. *
2130+ * This program is distributed in the hope that it will be useful. *
2131+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
2132+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
2133+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
2134+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
2135+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
2136+ * more details, a copy of which can be found in the file COPYING *
2137+ * included with this package. *
2138+ *******************************************************************/
2139+/* See Fibre Channel protocol T11 FC-SP for details */
2140+#include <linux/pci.h>
2141+#include <linux/interrupt.h>
2142+
2143+#include <scsi/scsi.h>
2144+#include <scsi/scsi_tcq.h>
2145+#include <scsi/scsi_transport_fc.h>
2146+
2147+#include "lpfc_hw.h"
2148+#include "lpfc_sli.h"
2149+#include "lpfc_nl.h"
2150+#include "lpfc_disc.h"
2151+#include "lpfc.h"
2152+#include "lpfc_crtn.h"
2153+#include "lpfc_logmsg.h"
2154+#include "lpfc_auth_access.h"
2155+#include "lpfc_auth.h"
2156+
2157+void
2158+lpfc_start_authentication(struct lpfc_vport *vport,
2159+ struct lpfc_nodelist *ndlp)
2160+{
2161+ uint32_t nego_payload_len;
2162+ uint8_t *nego_payload;
2163+
2164+ nego_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
2165+ if (!nego_payload)
2166+ return;
2167+ vport->auth.trans_id++;
2168+ vport->auth.auth_msg_state = LPFC_AUTH_NEGOTIATE;
2169+ nego_payload_len = lpfc_build_auth_neg(vport, nego_payload);
2170+ lpfc_issue_els_auth(vport, ndlp, AUTH_NEGOTIATE,
2171+ nego_payload, nego_payload_len);
2172+ kfree(nego_payload);
2173+}
2174+
2175+void
2176+lpfc_dhchap_make_challenge(struct Scsi_Host *shost, int status,
2177+ void *rsp, uint32_t rsp_len)
2178+{
2179+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2180+ struct lpfc_nodelist *ndlp;
2181+ uint32_t chal_payload_len;
2182+ uint8_t *chal_payload;
2183+ struct fc_auth_rsp *auth_rsp = rsp;
2184+
2185+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
2186+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2187+ kfree(rsp);
2188+ return;
2189+ }
2190+
2191+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
2192+ "1003 Send dhchap challenge local_wwpn "
2193+ "%llX remote_wwpn %llX \n",
2194+ (unsigned long long)auth_rsp->local_wwpn,
2195+ (unsigned long long)auth_rsp->remote_wwpn);
2196+
2197+ chal_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
2198+ if (!chal_payload) {
2199+ kfree(rsp);
2200+ return;
2201+ }
2202+ vport->auth.auth_msg_state = LPFC_DHCHAP_CHALLENGE;
2203+ chal_payload_len = lpfc_build_dhchap_challenge(vport,
2204+ chal_payload, rsp);
2205+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_CHALLENGE,
2206+ chal_payload, chal_payload_len);
2207+ kfree(chal_payload);
2208+ kfree(rsp);
2209+}
2210+
2211+
2212+void
2213+lpfc_dhchap_make_response(struct Scsi_Host *shost, int status,
2214+ void *rsp, uint32_t rsp_len)
2215+{
2216+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2217+ struct lpfc_nodelist *ndlp;
2218+ uint32_t reply_payload_len;
2219+ uint8_t *reply_payload;
2220+ struct fc_auth_rsp *auth_rsp = rsp;
2221+
2222+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
2223+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2224+ kfree(rsp);
2225+ return;
2226+ }
2227+
2228+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
2229+ "1004 Send dhchap reply local_wwpn "
2230+ "%llX remote_wwpn %llX \n",
2231+ (unsigned long long)auth_rsp->local_wwpn,
2232+ (unsigned long long)auth_rsp->remote_wwpn);
2233+
2234+ reply_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
2235+ if (!reply_payload) {
2236+ kfree(rsp);
2237+ return;
2238+ }
2239+
2240+ vport->auth.auth_msg_state = LPFC_DHCHAP_REPLY;
2241+ reply_payload_len = lpfc_build_dhchap_reply(vport, reply_payload, rsp);
2242+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_REPLY,
2243+ reply_payload, reply_payload_len);
2244+ kfree(reply_payload);
2245+ kfree(rsp);
2246+
2247+}
2248+
2249+
2250+void
2251+lpfc_dhchap_authenticate(struct Scsi_Host *shost,
2252+ int status, void *rsp,
2253+ uint32_t rsp_len)
2254+{
2255+ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp;
2256+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
2257+ struct lpfc_nodelist *ndlp;
2258+
2259+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
2260+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2261+ kfree(rsp);
2262+ return;
2263+ }
2264+ if (status != 0) {
2265+ lpfc_issue_els_auth_reject(vport, ndlp,
2266+ AUTH_ERR, AUTHENTICATION_FAILED);
2267+ kfree(rsp);
2268+ return;
2269+ }
2270+
2271+ if (auth_rsp->u.dhchap_success.authenticated) {
2272+ uint32_t suc_payload_len;
2273+ uint8_t *suc_payload;
2274+
2275+ suc_payload = kmalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
2276+ if (!suc_payload) {
2277+ lpfc_issue_els_auth_reject(vport, ndlp,
2278+ AUTH_ERR, AUTHENTICATION_FAILED);
2279+ kfree(rsp);
2280+ return;
2281+ }
2282+ suc_payload_len = lpfc_build_dhchap_success(vport,
2283+ suc_payload, rsp);
2284+ if (suc_payload_len == sizeof(uint32_t)) {
2285+ /* Authentication is complete after sending this SUCCESS */
2286+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS;
2287+ } else {
2288+ /* Need to wait for SUCCESS from Auth Initiator */
2289+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS_REPLY;
2290+ }
2291+ lpfc_issue_els_auth(vport, ndlp, DHCHAP_SUCCESS,
2292+ suc_payload, suc_payload_len);
2293+ kfree(suc_payload);
2294+ vport->auth.direction |= AUTH_DIRECTION_LOCAL;
2295+ } else {
2296+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2297+ "1005 AUTHENTICATION_FAILURE Nport:x%x\n",
2298+ ndlp->nlp_DID);
2299+ lpfc_issue_els_auth_reject(vport, ndlp,
2300+ AUTH_ERR, AUTHENTICATION_FAILED);
2301+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
2302+ lpfc_port_auth_failed(ndlp);
2303+ }
2304+ }
2305+
2306+ kfree(rsp);
2307+}
2308+
2309+int
2310+lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message,
2311+ uint8_t *reason, uint8_t *explanation)
2312+{
2313+ uint32_t prot_len;
2314+ uint32_t param_len;
2315+ int i, j = 0;
2316+
2317+ /* Following is the format of the message. Name Format.
2318+ * uint16_t nameTag;
2319+ * uint16_t nameLength;
2320+ * uint8_t name[8];
2321+ * AUTH_Negotiate Message
2322+ * uint32_t NumberOfAuthProtocals
2323+ * uint32_t AuthProtParameter#1Len
2324+ * uint32_t AuthProtID#1 (DH-CHAP = 0x1)
2325+ * AUTH_Negotiate DH-CHAP
2326+ * uint16_t DH-CHAPParameterTag (HashList = 0x1)
2327+ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries)
2328+ * uint8_t DH-CHAPParameter[]; (uint32_t entries)
2329+ * uint16_t DH-CHAPParameterTag (DHglDList = 0x2)
2330+ * uint16_t DH-CHAPParameterWordCount (number of uint32_t entries)
2331+ * uint8_t DH-CHAPParameter[]; (uint32_t entries)
2332+ * DHCHAP_Challenge Message
2333+ * uint32_t hashIdentifier;
2334+ * uint32_t dhgroupIdentifier;
2335+ * uint32_t challengevalueLen;
2336+ * uint8_t challengeValue[];
2337+ * uint32_t dhvalueLen;
2338+ * uint8_t dhvalue[];
2339+ */
2340+
2341+ /* Name Tag */
2342+ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) {
2343+ *reason = AUTH_ERR;
2344+ *explanation = BAD_PAYLOAD;
2345+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2346+ "1006 Bad Name tag in auth message 0x%x\n",
2347+ be16_to_cpu(*(uint16_t *)message));
2348+ return 1;
2349+ }
2350+ message += sizeof(uint16_t);
2351+
2352+ /* Name Length */
2353+ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) {
2354+ *reason = AUTH_ERR;
2355+ *explanation = BAD_PAYLOAD;
2356+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2357+ "1007 Bad Name length in auth message 0x%x\n",
2358+ be16_to_cpu(*(uint16_t *)message));
2359+ return 1;
2360+ }
2361+ message += sizeof(uint16_t);
2362+
2363+ /* Skip over Remote Port Name */
2364+ message += NAME_LEN;
2365+
2366+ /* Number of Auth Protocols must be 1 DH-CHAP */
2367+ if (be32_to_cpu(*(uint32_t *)message) != 1) {
2368+ *reason = AUTH_ERR;
2369+ *explanation = BAD_PAYLOAD;
2370+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2371+ "1008 Bad Number of Protocols 0x%x\n",
2372+ be32_to_cpu(*(uint32_t *)message));
2373+ return 1;
2374+ }
2375+ message += sizeof(uint32_t);
2376+
2377+ /* Protocol Parameter Length */
2378+ prot_len = be32_to_cpu(*(uint32_t *)message);
2379+ message += sizeof(uint32_t);
2380+
2381+ /* Protocol Parameter type */
2382+ if (be32_to_cpu(*(uint32_t *)message) != FC_DHCHAP) {
2383+ *reason = AUTH_ERR;
2384+ *explanation = BAD_PAYLOAD;
2385+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2386+ "1009 Bad param type 0x%x\n",
2387+ be32_to_cpu(*(uint32_t *)message));
2388+ return 1;
2389+ }
2390+ message += sizeof(uint32_t);
2391+
2392+ /* Parameter #1 Tag */
2393+ if (be16_to_cpu(*(uint16_t *)message) != HASH_LIST_TAG) {
2394+ *reason = AUTH_ERR;
2395+ *explanation = BAD_PAYLOAD;
2396+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2397+ "1010 Bad Tag 1 0x%x\n",
2398+ be16_to_cpu(*(uint16_t *)message));
2399+ return 1;
2400+ }
2401+ message += sizeof(uint16_t);
2402+
2403+ /* Parameter #1 Length */
2404+ param_len = be16_to_cpu(*(uint16_t *)message);
2405+ message += sizeof(uint16_t);
2406+
2407+ /* Choose a hash function */
2408+ for (i = 0; i < vport->auth.hash_len; i++) {
2409+ for (j = 0; j < param_len; j++) {
2410+ if (vport->auth.hash_priority[i] ==
2411+ be32_to_cpu(((uint32_t *)message)[j]))
2412+ break;
2413+ }
2414+ if (j != param_len)
2415+ break;
2416+ }
2417+ if (i == vport->auth.hash_len && j == param_len) {
2418+ *reason = AUTH_ERR;
2419+ *explanation = BAD_PAYLOAD;
2420+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2421+ "1011 Auth_neg no hash function chosen.\n");
2422+ return 1;
2423+ }
2424+ vport->auth.hash_id = vport->auth.hash_priority[i];
2425+ message += sizeof(uint32_t) * param_len;
2426+
2427+ /* Parameter #2 Tag */
2428+ if (be16_to_cpu(*(uint16_t *)message) != DHGID_LIST_TAG) {
2429+ *reason = AUTH_ERR;
2430+ *explanation = BAD_PAYLOAD;
2431+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2432+ "1012 Auth_negotiate Bad Tag 2 0x%x\n",
2433+ be16_to_cpu(*(uint16_t *)message));
2434+ return 1;
2435+ }
2436+ message += sizeof(uint16_t);
2437+
2438+ /* Parameter #2 Length */
2439+ param_len = be16_to_cpu(*(uint16_t *)message);
2440+ message += sizeof(uint16_t);
2441+
2442+ /* Choose a DH Group */
2443+ for (i = 0; i < vport->auth.dh_group_len; i++) {
2444+ for (j = 0; j < param_len; j++) {
2445+ if (vport->auth.dh_group_priority[i] ==
2446+ be32_to_cpu(((uint32_t *)message)[j]))
2447+ break;
2448+ }
2449+ if (j != param_len)
2450+ break;
2451+ }
2452+ if (i == vport->auth.dh_group_len && j == param_len) {
2453+ *reason = AUTH_ERR;
2454+ *explanation = BAD_PAYLOAD;
2455+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2456+ "1013 Auth_negotiate no DH_group found. \n");
2457+ return 1;
2458+ }
2459+ vport->auth.group_id = vport->auth.dh_group_priority[i];
2460+ message += sizeof(uint32_t) * param_len;
2461+
2462+ return 0;
2463+}
2464+
2465+int
2466+lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
2467+ uint8_t *reason, uint8_t *explanation)
2468+{
2469+ int i;
2470+
2471+ /* Following is the format of the message DHCHAP_Challenge.
2472+ * uint16_t nameTag;
2473+ * uint16_t nameLength;
2474+ * uint8_t name[8];
2475+ * uint32_t hashIdentifier;
2476+ * uint32_t dhgroupIdentifier;
2477+ * uint32_t challengevalueLen;
2478+ * uint8_t challengeValue[];
2479+ * uint32_t dhvalueLen;
2480+ * uint8_t dhvalue[];
2481+ */
2482+
2483+ /* Name Tag */
2484+ if (be16_to_cpu(*(uint16_t *)message) != NAME_TAG) {
2485+ *reason = AUTH_ERR;
2486+ *explanation = BAD_PAYLOAD;
2487+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2488+ "1014 dhchap challenge bad name tag 0x%x. \n",
2489+ be16_to_cpu(*(uint16_t *)message));
2490+ return 1;
2491+ }
2492+ message += sizeof(uint16_t);
2493+
2494+ /* Name Length */
2495+ if (be16_to_cpu(*(uint16_t *)message) != NAME_LEN) {
2496+ *reason = AUTH_ERR;
2497+ *explanation = BAD_PAYLOAD;
2498+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2499+ "1015 dhchap challenge bad name length "
2500+ "0x%x.\n", be16_to_cpu(*(uint16_t *)message));
2501+ return 1;
2502+ }
2503+ message += sizeof(uint16_t);
2504+
2505+ /* Remote Port Name */
2506+ message += NAME_LEN;
2507+
2508+ /* Hash ID */
2509+ vport->auth.hash_id = be32_to_cpu(*(uint32_t *)message); /* Hash id */
2510+ for (i = 0; i < vport->auth.hash_len; i++) {
2511+ if (vport->auth.hash_id == vport->auth.hash_priority[i])
2512+ break;
2513+ }
2514+ if (i == vport->auth.hash_len) {
2515+ *reason = LOGIC_ERR;
2516+ *explanation = BAD_ALGORITHM;
2517+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2518+ "1016 dhchap challenge Hash ID not Supported "
2519+ "0x%x. \n", vport->auth.hash_id);
2520+ return 1;
2521+ }
2522+ message += sizeof(uint32_t);
2523+
2524+ vport->auth.group_id =
2525+ be32_to_cpu(*(uint32_t *)message); /* DH group id */
2526+ for (i = 0; i < vport->auth.dh_group_len; i++) {
2527+ if (vport->auth.group_id == vport->auth.dh_group_priority[i])
2528+ break;
2529+ }
2530+ if (i == vport->auth.dh_group_len) {
2531+ *reason = LOGIC_ERR;
2532+ *explanation = BAD_DHGROUP;
2533+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2534+ "1017 dhchap challenge could not find DH "
2535+ "Group. \n");
2536+ return 1;
2537+ }
2538+ message += sizeof(uint32_t);
2539+
2540+ vport->auth.challenge_len =
2541+ be32_to_cpu(*(uint32_t *)message); /* Challenge Len */
2542+ message += sizeof(uint32_t);
2543+
2544+ /* copy challenge to vport */
2545+ if (vport->auth.challenge != NULL) {
2546+ kfree(vport->auth.challenge);
2547+ }
2548+ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL);
2549+ if (!vport->auth.challenge) {
2550+ *reason = AUTH_ERR;
2551+ return 1;
2552+ }
2553+ memcpy (vport->auth.challenge, message, vport->auth.challenge_len);
2554+ message += vport->auth.challenge_len;
2555+
2556+ vport->auth.dh_pub_key_len =
2557+ be32_to_cpu(*(uint32_t *)message); /* DH Value Len */
2558+ message += sizeof(uint32_t);
2559+
2560+ if (vport->auth.dh_pub_key_len != 0) {
2561+ if (vport->auth.group_id == DH_GROUP_NULL) {
2562+ *reason = LOGIC_ERR;
2563+ *explanation = BAD_DHGROUP;
2564+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
2565+ "1018 dhchap challenge No Public key "
2566+ "for non-NULL DH Group.\n");
2567+ return 1;
2568+ }
2569+
2570+ /* Copy to the vport to save for authentication */
2571+ if (vport->auth.dh_pub_key != NULL)
2572+ kfree(vport->auth.dh_pub_key);
2573+ vport->auth.dh_pub_key = kmalloc(vport->auth.dh_pub_key_len,
2574+ GFP_KERNEL);
2575+ if (!vport->auth.dh_pub_key) {
2576+ *reason = AUTH_ERR;
2577+ return 1;
2578+ }
2579+ memcpy(vport->auth.dh_pub_key, message,
2580+ vport->auth.dh_pub_key_len);
2581+ }
2582+ return 0;
2583+}
2584+
2585+int
2586+lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
2587+ struct fc_auth_req *fc_req)
2588+{
2589+ uint32_t rsp_len;
2590+ uint32_t dh_len;
2591+ uint32_t challenge_len;
2592+
2593+ /* Following is the format of the message DHCHAP_Reply.
2594+ * uint32_t Response Value Length;
2595+ * uint8_t Response Value[];
2596+ * uint32_t DH Value Length;
2597+ * uint8_t DH Value[];
2598+ * uint32_t Challenge Value Length;
2599+ * uint8_t Challenge Value[];
2600+ */
2601+
2602+ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */
2603+ message += sizeof(uint32_t);
2604+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len,
2605+ message, rsp_len);
2606+ fc_req->u.dhchap_success.received_response_len = rsp_len;
2607+ message += rsp_len;
2608+
2609+ dh_len = be32_to_cpu(*(uint32_t *)message); /* DH Len */
2610+ message += sizeof(uint32_t);
2611+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len +
2612+ rsp_len, message, dh_len);
2613+ fc_req->u.dhchap_success.received_public_key_len = dh_len;
2614+ message += dh_len;
2615+
2616+ challenge_len = be32_to_cpu(*(uint32_t *)message); /* Challenge Len */
2617+ message += sizeof(uint32_t);
2618+ memcpy (fc_req->u.dhchap_success.data + vport->auth.challenge_len
2619+ + rsp_len + dh_len,
2620+ message, challenge_len);
2621+ fc_req->u.dhchap_success.received_challenge_len = challenge_len;
2622+ message += challenge_len;
2623+
2624+ return (rsp_len + dh_len + challenge_len);
2625+}
2626+
2627+int
2628+lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
2629+ struct fc_auth_req *fc_req)
2630+{
2631+ uint32_t rsp_len = 0;
2632+
2633+ /* DHCHAP_Success.
2634+ * uint32_t responseValueLen;
2635+ * uint8_t response[];
2636+ */
2637+
2638+ rsp_len = be32_to_cpu(*(uint32_t *)message); /* Response Len */
2639+ message += sizeof(uint32_t);
2640+ memcpy(fc_req->u.dhchap_success.data + vport->auth.challenge_len,
2641+ message, rsp_len);
2642+ fc_req->u.dhchap_success.received_response_len = rsp_len;
2643+
2644+ memcpy(fc_req->u.dhchap_success.data +
2645+ vport->auth.challenge_len + rsp_len,
2646+ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len);
2647+
2648+ fc_req->u.dhchap_success.received_public_key_len =
2649+ vport->auth.dh_pub_key_len;
2650+
2651+ fc_req->u.dhchap_success.received_challenge_len = 0;
2652+
2653+ return (vport->auth.challenge_len + rsp_len +
2654+ vport->auth.dh_pub_key_len);
2655+ return 0;
2656+}
2657+
2658+int
2659+lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message)
2660+{
2661+ uint8_t *message_start = message;
2662+ uint8_t *params_start;
2663+ uint32_t *params_len;
2664+ uint32_t len;
2665+ int i;
2666+
2667+ /* Because some of the fields are not static in length
2668+ * and number we will pack on the fly.This will be expanded
2669+ * in the future to optionally offer DHCHAP or FCAP or both.
2670+ * The packing is done in Big Endian byte order DHCHAP_Reply.
2671+ *
2672+ * uint16_t nameTag;
2673+ * uint16_t nameLength;
2674+ * uint8_t name[8];
2675+ * uint32_t available; For now we will only offer one
2676+ protocol ( DHCHAP ) for authentication.
2677+ * uint32_t potocolParamsLenId#1;
2678+ * uint32_t protocolId#1; 1 : DHCHAP. The protocol list is
2679+ * in order of preference.
2680+ * uint16_t parameter#1Tag 1 : HashList
2681+ * uint16_t parameter#1Len 2 : Count of how many parameter values
2682+ * follow in order of preference.
2683+ * uint16_t parameter#1value#1 5 : MD5 Hash Function
2684+ * uint16_t parameter#1value#2 6 : SHA-1 Hash Function
2685+ * uint16_t parameter#2Tag 2 : DHglDList
2686+ * uint16_t parameter#2Len 1 : Only One is supported now
2687+ * uint16_t parameter#2value#1 0 : NULL DH-CHAP Algorithm
2688+ * uint16_t parameter#2value#2 ...
2689+ * uint32_t protocolParamsLenId#2;
2690+ * uint32_t protocolId#2; 2 = FCAP
2691+ * uint16_t parameter#1Tag
2692+ * uint16_t parameter#1Len
2693+ * uint16_t parameter#1value#1
2694+ * uint16_t parameter#1value#2 ...
2695+ * uint16_t parameter#2Tag
2696+ * uint16_t parameter#2Len
2697+ * uint16_t parameter#2value#1
2698+ * uint16_t parameter#2value#2 ...
2699+ */
2700+
2701+
2702+ /* Name Tag */
2703+ *((uint16_t *)message) = cpu_to_be16(NAME_TAG);
2704+ message += sizeof(uint16_t);
2705+
2706+ /* Name Len */
2707+ *((uint16_t *)message) = cpu_to_be16(NAME_LEN);
2708+ message += sizeof(uint16_t);
2709+
2710+ memcpy(message, vport->fc_portname.u.wwn, sizeof(uint64_t));
2711+
2712+ message += sizeof(uint64_t);
2713+
2714+ /* Protocols Available */
2715+ *((uint32_t *)message) = cpu_to_be32(PROTS_NUM);
2716+ message += sizeof(uint32_t);
2717+
2718+ /* First Protocol Params Len */
2719+ params_len = (uint32_t *)message;
2720+ message += sizeof(uint32_t);
2721+
2722+ /* Start of first Param */
2723+ params_start = message;
2724+
2725+ /* Protocol Id */
2726+ *((uint32_t *)message) = cpu_to_be32(FC_DHCHAP);
2727+ message += sizeof(uint32_t);
2728+
2729+ /* Hash List Tag */
2730+ *((uint16_t *)message) = cpu_to_be16(HASH_LIST_TAG);
2731+ message += sizeof(uint16_t);
2732+
2733+ /* Hash Value Len */
2734+ *((uint16_t *)message) = cpu_to_be16(vport->auth.hash_len);
2735+ message += sizeof(uint16_t);
2736+
2737+ /* Hash Value each 4 byte words */
2738+ for (i = 0; i < vport->auth.hash_len; i++) {
2739+ *((uint32_t *)message) =
2740+ cpu_to_be32(vport->auth.hash_priority[i]);
2741+ message += sizeof(uint32_t);
2742+ }
2743+
2744+ /* DHgIDList Tag */
2745+ *((uint16_t *)message) = cpu_to_be16(DHGID_LIST_TAG);
2746+ message += sizeof(uint16_t);
2747+
2748+ /* DHgIDListValue Len */
2749+ *((uint16_t *)message) = cpu_to_be16(vport->auth.dh_group_len);
2750+
2751+ message += sizeof(uint16_t);
2752+
2753+ /* DHgIDList each 4 byte words */
2754+
2755+ for (i = 0; i < vport->auth.dh_group_len; i++) {
2756+ *((uint32_t *)message) =
2757+ cpu_to_be32(vport->auth.dh_group_priority[i]);
2758+ message += sizeof(uint32_t);
2759+ }
2760+
2761+ *params_len = cpu_to_be32(message - params_start);
2762+
2763+ len = (uint32_t)(message - message_start);
2764+
2765+ return len;
2766+}
2767+
2768+int
2769+lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
2770+ struct fc_auth_rsp *fc_rsp)
2771+{
2772+ uint8_t *message_start = message;
2773+
2774+ /* Because some of the fields are not static in length and number
2775+ * we will pack on the fly. The packing is done in Big Endian byte
2776+ * order DHCHAP_Challenge.
2777+ *
2778+ * uint16_t nameTag;
2779+ * uint16_t nameLength;
2780+ * uint8_t name[8];
2781+ * uint32_t Hash_Identifier;
2782+ * uint32_t DH_Group_Identifier;
2783+ * uint32_t Challenge_Value_Length;
2784+ * uint8_t Challenge_Value[];
2785+ * uint32_t DH_Value_Length;
2786+ * uint8_t DH_Value[];
2787+ */
2788+
2789+ /* Name Tag */
2790+ *((uint16_t *)message) = cpu_to_be16(NAME_TAG);
2791+ message += sizeof(uint16_t);
2792+
2793+ /* Name Len */
2794+ *((uint16_t *)message) = cpu_to_be16(NAME_LEN);
2795+ message += sizeof(uint16_t);
2796+
2797+ memcpy(message, vport->fc_portname.u.wwn, NAME_LEN);
2798+ message += NAME_LEN;
2799+
2800+ /* Hash Value each 4 byte words */
2801+ *((uint32_t *)message) = cpu_to_be32(vport->auth.hash_id);
2802+ message += sizeof(uint32_t);
2803+
2804+ /* DH group id each 4 byte words */
2805+ *((uint32_t *)message) = cpu_to_be32(vport->auth.group_id);
2806+ message += sizeof(uint32_t);
2807+
2808+ /* Challenge Length */
2809+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
2810+ dhchap_challenge.our_challenge_len);
2811+ message += sizeof(uint32_t);
2812+
2813+ /* copy challenge to vport to save */
2814+ if (vport->auth.challenge)
2815+ kfree(vport->auth.challenge);
2816+ vport->auth.challenge_len = fc_rsp->u.
2817+ dhchap_challenge.our_challenge_len;
2818+ vport->auth.challenge = kmalloc(vport->auth.challenge_len, GFP_KERNEL);
2819+
2820+ if (!vport->auth.challenge)
2821+ return 0;
2822+
2823+ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_challenge.data,
2824+ fc_rsp->u.dhchap_challenge.our_challenge_len);
2825+
2826+ /* Challenge */
2827+ memcpy(message, fc_rsp->u.dhchap_challenge.data,
2828+ fc_rsp->u.dhchap_challenge.our_challenge_len);
2829+ message += fc_rsp->u.dhchap_challenge.our_challenge_len;
2830+
2831+ /* Public Key length */
2832+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
2833+ dhchap_challenge.our_public_key_len);
2834+ message += sizeof(uint32_t);
2835+
2836+ /* Public Key */
2837+ memcpy(message, fc_rsp->u.dhchap_challenge.data +
2838+ fc_rsp->u.dhchap_challenge.our_challenge_len,
2839+ fc_rsp->u.dhchap_challenge.our_public_key_len);
2840+ message += fc_rsp->u.dhchap_challenge.our_public_key_len;
2841+
2842+ return ((uint32_t)(message - message_start));
2843+
2844+}
2845+
2846+int
2847+lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
2848+ struct fc_auth_rsp *fc_rsp)
2849+
2850+{
2851+ uint8_t *message_start = message;
2852+
2853+ /*
2854+ * Because some of the fields are not static in length and
2855+ * number we will pack on the fly. The packing is done in
2856+ * Big Endian byte order DHCHAP_Reply.
2857+ *
2858+ * uint32_t ResonseLength;
2859+ * uint8_t ResponseValue[];
2860+ * uint32_t DHLength;
2861+ * uint8_t DHValue[]; Our Public key
2862+ * uint32_t ChallengeLength; Used for bi-directional authentication
2863+ * uint8_t ChallengeValue[];
2864+ *
2865+ * The combined key ( g^x mod p )^y mod p is used as the last
2866+ * hash of the password.
2867+ *
2868+ * g is the base 2 or 5.
2869+ * y is our private key.
2870+ * ( g^y mod p ) is our public key which we send.
2871+ * ( g^x mod p ) is their public key which we received.
2872+ */
2873+ /* Response Value Length */
2874+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply.
2875+ our_challenge_rsp_len);
2876+
2877+ message += sizeof(uint32_t);
2878+ /* Response Value */
2879+ memcpy(message, fc_rsp->u.dhchap_reply.data,
2880+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len);
2881+
2882+ message += fc_rsp->u.dhchap_reply.our_challenge_rsp_len;
2883+ /* DH Value Length */
2884+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.dhchap_reply.
2885+ our_public_key_len);
2886+
2887+ message += sizeof(uint32_t);
2888+ /* DH Value */
2889+ memcpy(message, fc_rsp->u.dhchap_reply.data +
2890+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len,
2891+ fc_rsp->u.dhchap_reply.our_public_key_len);
2892+
2893+ message += fc_rsp->u.dhchap_reply.our_public_key_len;
2894+
2895+ if (vport->auth.bidirectional) {
2896+
2897+ /* copy to vport to save */
2898+ if (vport->auth.challenge)
2899+ kfree(vport->auth.challenge);
2900+ vport->auth.challenge_len = fc_rsp->u.dhchap_reply.
2901+ our_challenge_len;
2902+ vport->auth.challenge = kmalloc(vport->auth.challenge_len,
2903+ GFP_KERNEL);
2904+ if (!vport->auth.challenge)
2905+ return 0;
2906+
2907+ memcpy(vport->auth.challenge, fc_rsp->u.dhchap_reply.data +
2908+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len +
2909+ fc_rsp->u.dhchap_reply.our_public_key_len,
2910+ fc_rsp->u.dhchap_reply.our_challenge_len);
2911+ /* Challenge Value Length */
2912+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
2913+ dhchap_reply.our_challenge_len);
2914+ message += sizeof(uint32_t);
2915+ /* Challenge Value */
2916+ memcpy(message, fc_rsp->u.dhchap_reply.data +
2917+ fc_rsp->u.dhchap_reply.our_challenge_rsp_len +
2918+ fc_rsp->u.dhchap_reply.our_public_key_len,
2919+ fc_rsp->u.dhchap_reply.our_challenge_len);
2920+
2921+ message += fc_rsp->u.dhchap_reply.our_challenge_len;
2922+
2923+ } else {
2924+ *((uint32_t *)message) = 0; /* Challenge Len for No
2925+ bidirectional authentication */
2926+ message += sizeof(uint32_t); /* Challenge Value Not Present */
2927+ }
2928+
2929+ return ((uint32_t)(message - message_start));
2930+
2931+}
2932+
2933+int
2934+lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
2935+ struct fc_auth_rsp *fc_rsp)
2936+{
2937+ uint8_t *message_start = message;
2938+
2939+ /*
2940+ * Because some of the fields are not static in length and number
2941+ * we will pack on the fly. The packing is done in Big Endian byte
2942+ * order DHCHAP_Success.
2943+ * uint32_t responseValueLen;
2944+ * uint8_t response[];.
2945+ */
2946+
2947+ *((uint32_t *)message) = cpu_to_be32(fc_rsp->u.
2948+ dhchap_success.response_len);
2949+ message += sizeof(uint32_t);
2950+
2951+ memcpy(message, fc_rsp->u.dhchap_success.data,
2952+ fc_rsp->u.dhchap_success.response_len);
2953+ message += fc_rsp->u.dhchap_success.response_len;
2954+
2955+ return ((uint32_t)(message - message_start));
2956+}
2957+
2958--- /dev/null
2959+++ b/drivers/scsi/lpfc/lpfc_auth.h
2960@@ -0,0 +1,92 @@
2961+/*******************************************************************
2962+ * This file is part of the Emulex Linux Device Driver for *
2963+ * Fibre Channel Host Bus Adapters. *
2964+ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
2965+ * EMULEX and SLI are trademarks of Emulex. *
2966+ * www.emulex.com *
2967+ * *
2968+ * This program is free software; you can redistribute it and/or *
2969+ * modify it under the terms of version 2 of the GNU General *
2970+ * Public License as published by the Free Software Foundation. *
2971+ * This program is distributed in the hope that it will be useful. *
2972+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
2973+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
2974+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
2975+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
2976+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
2977+ * more details, a copy of which can be found in the file COPYING *
2978+ * included with this package. *
2979+ *******************************************************************/
2980+
2981+#define N_DH_GROUP 4
2982+#define ELS_CMD_AUTH_BYTE 0x90
2983+
2984+#define AUTH_REJECT 0xA
2985+#define AUTH_NEGOTIATE 0xB
2986+#define AUTH_DONE 0xC
2987+
2988+#define DHCHAP_CHALLENGE 0x10
2989+#define DHCHAP_REPLY 0x11
2990+#define DHCHAP_SUCCESS 0x12
2991+
2992+#define FCAP_REQUEST 0x13
2993+#define FCAP_ACK 0x14
2994+#define FCAP_CONFIRM 0x15
2995+
2996+#define PROTS_NUM 0x01
2997+
2998+#define NAME_TAG 0x01
2999+#define NAME_LEN 0x08
3000+
3001+#define HASH_LIST_TAG 0x01
3002+
3003+#define DHGID_LIST_TAG 0x02
3004+
3005+#define HBA_SECURITY 0x20
3006+
3007+#define AUTH_ERR 0x1
3008+#define LOGIC_ERR 0x2
3009+
3010+#define BAD_DHGROUP 0x2
3011+#define BAD_ALGORITHM 0x3
3012+#define AUTHENTICATION_FAILED 0x5
3013+#define BAD_PAYLOAD 0x6
3014+#define BAD_PROTOCOL 0x7
3015+#define RESTART 0x8
3016+
3017+#define AUTH_VERSION 0x1
3018+
3019+#define MAX_AUTH_MESSAGE_SIZE 1024
3020+
3021+struct lpfc_auth_reject {
3022+ uint8_t reason;
3023+ uint8_t explanation;
3024+ uint8_t reserved[2];
3025+} __attribute__ ((packed));
3026+
3027+struct lpfc_auth_message { /* Structure is in Big Endian format */
3028+ uint8_t command_code;
3029+ uint8_t flags;
3030+ uint8_t message_code;
3031+ uint8_t protocol_ver;
3032+ uint32_t message_len;
3033+ uint32_t trans_id;
3034+ uint8_t data[0];
3035+} __attribute__ ((packed));
3036+
3037+int lpfc_build_auth_neg(struct lpfc_vport *vport, uint8_t *message);
3038+int lpfc_build_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
3039+ struct fc_auth_rsp *fc_rsp);
3040+int lpfc_build_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
3041+ struct fc_auth_rsp *fc_rsp);
3042+int lpfc_build_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
3043+ struct fc_auth_rsp *fc_rsp);
3044+
3045+int lpfc_unpack_auth_negotiate(struct lpfc_vport *vport, uint8_t *message,
3046+ uint8_t *reason, uint8_t *explanation);
3047+int lpfc_unpack_dhchap_challenge(struct lpfc_vport *vport, uint8_t *message,
3048+ uint8_t *reason, uint8_t *explanation);
3049+int lpfc_unpack_dhchap_reply(struct lpfc_vport *vport, uint8_t *message,
3050+ struct fc_auth_req *fc_req);
3051+int lpfc_unpack_dhchap_success(struct lpfc_vport *vport, uint8_t *message,
3052+ struct fc_auth_req *fc_req);
3053--- a/drivers/scsi/lpfc/lpfc_crtn.h
3054+++ b/drivers/scsi/lpfc/lpfc_crtn.h
3055@@ -21,6 +21,12 @@
3056 typedef int (*node_filter)(struct lpfc_nodelist *, void *);
3057
3058 struct fc_rport;
3059+int lpfc_issue_els_auth(struct lpfc_vport *, struct lpfc_nodelist *,
3060+ uint8_t message_code, uint8_t *payload,
3061+ uint32_t payload_len);
3062+int lpfc_issue_els_auth_reject(struct lpfc_vport *vport,
3063+ struct lpfc_nodelist *ndlp,
3064+ uint8_t reason, uint8_t explanation);
3065 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
3066 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
3067 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
3068@@ -80,7 +86,10 @@ void lpfc_cleanup(struct lpfc_vport *);
3069 void lpfc_disc_timeout(unsigned long);
3070
3071 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
3072+struct lpfc_nodelist *lpfc_findnode_wwnn(struct lpfc_vport *,
3073+ struct lpfc_name *);
3074
3075+void lpfc_port_auth_failed(struct lpfc_nodelist *);
3076 void lpfc_worker_wake_up(struct lpfc_hba *);
3077 int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
3078 int lpfc_do_work(void *);
3079@@ -95,6 +104,9 @@ void lpfc_more_plogi(struct lpfc_vport *
3080 void lpfc_more_adisc(struct lpfc_vport *);
3081 void lpfc_end_rscn(struct lpfc_vport *);
3082 int lpfc_els_chk_latt(struct lpfc_vport *);
3083+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
3084+ uint8_t, struct lpfc_nodelist *, uint32_t,
3085+ uint32_t);
3086 int lpfc_els_abort_flogi(struct lpfc_hba *);
3087 int lpfc_initial_flogi(struct lpfc_vport *);
3088 int lpfc_initial_fdisc(struct lpfc_vport *);
3089@@ -117,6 +129,8 @@ int lpfc_els_rsp_prli_acc(struct lpfc_vp
3090 void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
3091 void lpfc_els_retry_delay(unsigned long);
3092 void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
3093+void lpfc_reauth_node(unsigned long);
3094+void lpfc_reauthentication_handler(struct lpfc_nodelist *);
3095 void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
3096 struct lpfc_iocbq *);
3097 int lpfc_els_handle_rscn(struct lpfc_vport *);
3098@@ -258,7 +272,6 @@ void lpfc_free_sysfs_attr(struct lpfc_vp
3099 extern struct device_attribute *lpfc_hba_attrs[];
3100 extern struct device_attribute *lpfc_vport_attrs[];
3101 extern struct scsi_host_template lpfc_template;
3102-extern struct scsi_host_template lpfc_vport_template;
3103 extern struct fc_function_template lpfc_transport_functions;
3104 extern struct fc_function_template lpfc_vport_transport_functions;
3105 extern int lpfc_sli_mode;
3106@@ -276,6 +289,22 @@ void destroy_port(struct lpfc_vport *);
3107 int lpfc_get_instance(void);
3108 void lpfc_host_attrib_init(struct Scsi_Host *);
3109
3110+int lpfc_selective_reset(struct lpfc_hba *);
3111+int lpfc_security_wait(struct lpfc_hba *);
3112+int lpfc_get_security_enabled(struct Scsi_Host *);
3113+void lpfc_security_service_online(struct Scsi_Host *);
3114+void lpfc_security_service_offline(struct Scsi_Host *);
3115+void lpfc_security_config(struct Scsi_Host *, int status, void *);
3116+int lpfc_security_config_wait(struct lpfc_vport *vport);
3117+void lpfc_dhchap_make_challenge(struct Scsi_Host *, int , void *, uint32_t);
3118+void lpfc_dhchap_make_response(struct Scsi_Host *, int , void *, uint32_t);
3119+void lpfc_dhchap_authenticate(struct Scsi_Host *, int , void *, uint32_t);
3120+int lpfc_start_node_authentication(struct lpfc_nodelist *);
3121+int lpfc_get_auth_config(struct lpfc_nodelist *, struct lpfc_name *);
3122+void lpfc_start_discovery(struct lpfc_vport *vport);
3123+void lpfc_start_authentication(struct lpfc_vport *, struct lpfc_nodelist *);
3124+int lpfc_rcv_nl_msg(struct Scsi_Host *, void *, uint32_t, uint32_t);
3125+
3126 extern void lpfc_debugfs_initialize(struct lpfc_vport *);
3127 extern void lpfc_debugfs_terminate(struct lpfc_vport *);
3128 extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
3129@@ -284,6 +313,11 @@ extern void lpfc_debugfs_slow_ring_trc(s
3130 uint32_t, uint32_t);
3131 extern struct lpfc_hbq_init *lpfc_hbq_defs[];
3132
3133+extern uint8_t lpfc_security_service_state;
3134+extern spinlock_t fc_security_user_lock;
3135+extern struct list_head fc_security_user_list;
3136+extern int fc_service_state;
3137+
3138 /* Interface exported by fabric iocb scheduler */
3139 void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
3140 void lpfc_fabric_abort_hba(struct lpfc_hba *);
3141@@ -293,6 +327,7 @@ void lpfc_adjust_queue_depth(struct lpfc
3142 void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
3143 void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
3144 void lpfc_scsi_dev_block(struct lpfc_hba *);
3145+void lpfc_scsi_dev_rescan(struct lpfc_hba *);
3146
3147 void
3148 lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
3149--- a/drivers/scsi/lpfc/lpfc_disc.h
3150+++ b/drivers/scsi/lpfc/lpfc_disc.h
3151@@ -37,6 +37,7 @@ enum lpfc_work_type {
3152 LPFC_EVT_KILL,
3153 LPFC_EVT_ELS_RETRY,
3154 LPFC_EVT_DEV_LOSS,
3155+ LPFC_EVT_REAUTH,
3156 LPFC_EVT_FASTPATH_MGMT_EVT,
3157 };
3158
3159@@ -99,10 +100,12 @@ struct lpfc_nodelist {
3160 #define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
3161
3162 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
3163+ struct timer_list nlp_reauth_tmr; /* Used for re-authentication */
3164 struct fc_rport *rport; /* Corresponding FC transport
3165 port structure */
3166 struct lpfc_vport *vport;
3167 struct lpfc_work_evt els_retry_evt;
3168+ struct lpfc_work_evt els_reauth_evt;
3169 struct lpfc_work_evt dev_loss_evt;
3170 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
3171 unsigned long last_q_full_time; /* jiffy of last queue full */
3172--- a/drivers/scsi/lpfc/lpfc_els.c
3173+++ b/drivers/scsi/lpfc/lpfc_els.c
3174@@ -38,6 +38,9 @@
3175 #include "lpfc_crtn.h"
3176 #include "lpfc_vport.h"
3177 #include "lpfc_debugfs.h"
3178+#include "lpfc_auth_access.h"
3179+#include "lpfc_auth.h"
3180+#include "lpfc_security.h"
3181
3182 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
3183 struct lpfc_iocbq *);
3184@@ -143,7 +146,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vpo
3185 * Pointer to the newly allocated/prepared els iocb data structure
3186 * NULL - when els iocb data structure allocation/preparation failed
3187 **/
3188-static struct lpfc_iocbq *
3189+struct lpfc_iocbq *
3190 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
3191 uint16_t cmdSize, uint8_t retry,
3192 struct lpfc_nodelist *ndlp, uint32_t did,
3193@@ -653,6 +656,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
3194 struct lpfc_nodelist *ndlp = cmdiocb->context1;
3195 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
3196 struct serv_parm *sp;
3197+ struct lpfc_name wwpn;
3198 int rc;
3199
3200 /* Check to see if link went down during discovery */
3201@@ -703,7 +707,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
3202 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3203
3204 sp = prsp->virt + sizeof(uint32_t);
3205-
3206+ if (sp->cmn.security)
3207+ ndlp->nlp_flag |= NLP_SC_REQ;
3208+ else
3209+ ndlp->nlp_flag &= ~NLP_SC_REQ;
3210 /* FLOGI completes successfully */
3211 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3212 "0101 FLOGI completes sucessfully "
3213@@ -711,6 +718,20 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phb
3214 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
3215 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
3216
3217+ if (vport->cfg_enable_auth) {
3218+ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn);
3219+ if (lpfc_get_auth_config(ndlp, &wwpn))
3220+ goto flogifail;
3221+ } else {
3222+ vport->auth.security_active = 0;
3223+ if (sp->cmn.security) {
3224+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3225+ "1055 Authentication parameter is "
3226+ "disabled, but is required by "
3227+ "the fabric.\n");
3228+ goto flogifail;
3229+ }
3230+ }
3231 if (vport->port_state == LPFC_FLOGI) {
3232 /*
3233 * If Common Service Parameters indicate Nport
3234@@ -800,6 +821,10 @@ lpfc_issue_els_flogi(struct lpfc_vport *
3235 sp = (struct serv_parm *) pcmd;
3236
3237 /* Setup CSPs accordingly for Fabric */
3238+
3239+ if (vport->cfg_enable_auth)
3240+ sp->cmn.security = 1;
3241+
3242 sp->cmn.e_d_tov = 0;
3243 sp->cmn.w2.r_a_tov = 0;
3244 sp->cls1.classValid = 0;
3245@@ -965,6 +990,17 @@ lpfc_initial_fdisc(struct lpfc_vport *vp
3246 struct lpfc_hba *phba = vport->phba;
3247 struct lpfc_nodelist *ndlp;
3248
3249+ if (vport->cfg_enable_auth) {
3250+ if (lpfc_security_wait(phba)) {
3251+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3252+ "1049 Authentication is enabled but "
3253+ "authentication service is not "
3254+ "running\n");
3255+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
3256+ return 0;
3257+ }
3258+ }
3259+
3260 /* First look for the Fabric ndlp */
3261 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3262 if (!ndlp) {
3263@@ -2697,6 +2733,17 @@ lpfc_els_retry(struct lpfc_hba *phba, st
3264 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3265 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3266 return 1;
3267+ case ELS_CMD_AUTH_NEG:
3268+ case ELS_CMD_DH_CHA:
3269+ case ELS_CMD_DH_REP:
3270+ case ELS_CMD_DH_SUC:
3271+ ndlp->nlp_prev_state = ndlp->nlp_state;
3272+ ndlp->nlp_state = NLP_STE_NPR_NODE;
3273+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3274+ "0143 Authentication LS_RJT Logical "
3275+ "busy\n");
3276+ lpfc_start_authentication(vport, ndlp);
3277+ return 1;
3278 }
3279 }
3280 /* No retry ELS command <elsCmd> to remote NPORT <did> */
3281@@ -5085,6 +5132,363 @@ lpfc_els_flush_all_cmd(struct lpfc_hba
3282 return;
3283 }
3284
3285+static void
3286+lpfc_els_rcv_auth_neg(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3287+ struct lpfc_nodelist *ndlp)
3288+{
3289+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3290+ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
3291+ struct lpfc_auth_message *authcmd;
3292+ uint8_t reason, explanation;
3293+ uint32_t message_len;
3294+ uint32_t trans_id;
3295+ struct fc_auth_req *fc_req;
3296+ struct fc_auth_rsp *fc_rsp;
3297+
3298+ authcmd = pcmd->virt;
3299+ message_len = be32_to_cpu(authcmd->message_len);
3300+ trans_id = be32_to_cpu(authcmd->trans_id);
3301+
3302+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3303+
3304+ vport->auth.trans_id = trans_id;
3305+
3306+ if (lpfc_unpack_auth_negotiate(vport, authcmd->data,
3307+ &reason, &explanation)) {
3308+ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
3309+ return;
3310+ }
3311+ vport->auth.direction = AUTH_DIRECTION_NONE;
3312+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SECURITY,
3313+ "1033 Received auth_negotiate from Nport:x%x\n",
3314+ ndlp->nlp_DID);
3315+
3316+ fc_req = kzalloc(sizeof(struct fc_auth_req), GFP_KERNEL);
3317+
3318+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
3319+ if (ndlp->nlp_type & NLP_FABRIC)
3320+ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
3321+ else
3322+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3323+ fc_req->u.dhchap_challenge.transaction_id = vport->auth.trans_id;
3324+ fc_req->u.dhchap_challenge.dh_group_id = vport->auth.group_id;
3325+ fc_req->u.dhchap_challenge.hash_id = vport->auth.hash_id;
3326+
3327+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
3328+
3329+ if (lpfc_fc_security_dhchap_make_challenge(shost,
3330+ fc_req, sizeof(struct fc_auth_req),
3331+ fc_rsp, MAX_AUTH_RSP_SIZE)) {
3332+ kfree(fc_rsp);
3333+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
3334+ }
3335+
3336+ kfree(fc_req);
3337+
3338+}
3339+
3340+static void
3341+lpfc_els_rcv_chap_chal(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3342+ struct lpfc_nodelist *ndlp)
3343+{
3344+
3345+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3346+ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
3347+ struct lpfc_auth_message *authcmd;
3348+ uint8_t reason, explanation;
3349+ uint32_t message_len;
3350+ uint32_t trans_id;
3351+ struct fc_auth_req *fc_req;
3352+ struct fc_auth_rsp *fc_rsp;
3353+ uint32_t fc_req_len;
3354+
3355+ authcmd = pcmd->virt;
3356+ message_len = be32_to_cpu(authcmd->message_len);
3357+ trans_id = be32_to_cpu(authcmd->trans_id);
3358+
3359+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3360+
3361+ if (vport->auth.auth_msg_state != LPFC_AUTH_NEGOTIATE) {
3362+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3363+ "1034 Not Expecting Challenge - Rejecting "
3364+ "Challenge.\n");
3365+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
3366+ return;
3367+ }
3368+
3369+ if (trans_id != vport->auth.trans_id) {
3370+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3371+ "1035 Transport ID does not match - Rejecting "
3372+ "Challenge.\n");
3373+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
3374+ return;
3375+ }
3376+
3377+ if (lpfc_unpack_dhchap_challenge(vport, authcmd->data,
3378+ &reason, &explanation)) {
3379+ lpfc_issue_els_auth_reject(vport, ndlp, reason, explanation);
3380+ return;
3381+ }
3382+ vport->auth.direction = AUTH_DIRECTION_NONE;
3383+
3384+ fc_req_len = (sizeof(struct fc_auth_req) +
3385+ vport->auth.challenge_len +
3386+ vport->auth.dh_pub_key_len);
3387+ fc_req = kzalloc(fc_req_len, GFP_KERNEL);
3388+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
3389+ if (ndlp->nlp_type & NLP_FABRIC)
3390+ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
3391+ else
3392+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3393+ fc_req->u.dhchap_reply.transaction_id = vport->auth.trans_id;
3394+ fc_req->u.dhchap_reply.dh_group_id = vport->auth.group_id;
3395+ fc_req->u.dhchap_reply.hash_id = vport->auth.hash_id;
3396+ fc_req->u.dhchap_reply.bidirectional = vport->auth.bidirectional;
3397+ fc_req->u.dhchap_reply.received_challenge_len =
3398+ vport->auth.challenge_len;
3399+ fc_req->u.dhchap_reply.received_public_key_len =
3400+ vport->auth.dh_pub_key_len;
3401+ memcpy(fc_req->u.dhchap_reply.data, vport->auth.challenge,
3402+ vport->auth.challenge_len);
3403+ if (vport->auth.group_id != DH_GROUP_NULL) {
3404+ memcpy(fc_req->u.dhchap_reply.data + vport->auth.challenge_len,
3405+ vport->auth.dh_pub_key, vport->auth.dh_pub_key_len);
3406+ }
3407+
3408+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
3409+
3410+ if (lpfc_fc_security_dhchap_make_response(shost,
3411+ fc_req, fc_req_len,
3412+ fc_rsp, MAX_AUTH_RSP_SIZE)) {
3413+ kfree(fc_rsp);
3414+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
3415+ }
3416+
3417+ kfree(fc_req);
3418+
3419+}
3420+
3421+static void
3422+lpfc_els_rcv_auth_rjt(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3423+ struct lpfc_nodelist *ndlp)
3424+{
3425+
3426+ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
3427+ struct lpfc_auth_message *authcmd;
3428+ uint32_t message_len;
3429+ uint32_t trans_id;
3430+ struct lpfc_auth_reject *rjt;
3431+ struct lpfc_hba *phba = vport->phba;
3432+
3433+ authcmd = pcmd->virt;
3434+ rjt = (struct lpfc_auth_reject *)authcmd->data;
3435+
3436+ message_len = be32_to_cpu(authcmd->message_len);
3437+ trans_id = be32_to_cpu(authcmd->trans_id);
3438+
3439+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3440+
3441+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
3442+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3443+ "1036 Authentication transaction reject - "
3444+ "re-auth request reason 0x%x exp 0x%x\n",
3445+ rjt->reason, rjt->explanation);
3446+ lpfc_port_auth_failed(ndlp);
3447+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) {
3448+ /* start authentication */
3449+ lpfc_start_authentication(vport, ndlp);
3450+ }
3451+ } else {
3452+ if (rjt->reason == LOGIC_ERR && rjt->explanation == RESTART) {
3453+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3454+ "1037 Authentication transaction "
3455+ "reject - restarting authentication. "
3456+ "reason 0x%x exp 0x%x\n",
3457+ rjt->reason, rjt->explanation);
3458+ /* restart auth */
3459+ lpfc_start_authentication(vport, ndlp);
3460+ } else {
3461+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3462+ "1057 Authentication transaction "
3463+ "reject. reason 0x%x exp 0x%x\n",
3464+ rjt->reason, rjt->explanation);
3465+ vport->auth.auth_msg_state = LPFC_AUTH_REJECT;
3466+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3467+ (phba->link_state != LPFC_CLEAR_LA)) {
3468+ /* If Auth failed enable link interrupt. */
3469+ lpfc_issue_clear_la(phba, vport);
3470+ }
3471+ }
3472+ }
3473+}
3474+
3475+static void
3476+lpfc_els_rcv_chap_reply(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3477+ struct lpfc_nodelist *ndlp)
3478+{
3479+
3480+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3481+ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
3482+ struct lpfc_auth_message *authcmd;
3483+ uint32_t message_len;
3484+ uint32_t trans_id;
3485+ struct fc_auth_req *fc_req;
3486+ struct fc_auth_rsp *fc_rsp;
3487+ uint32_t data_len;
3488+
3489+ authcmd = pcmd->virt;
3490+ message_len = be32_to_cpu(authcmd->message_len);
3491+ trans_id = be32_to_cpu(authcmd->trans_id);
3492+
3493+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3494+
3495+ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
3496+
3497+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
3498+ if (ndlp->nlp_type & NLP_FABRIC)
3499+ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
3500+ else
3501+ fc_req->remote_wwpn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3502+
3503+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_CHALLENGE) {
3504+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3505+ "1039 Not Expecting Reply - rejecting. State "
3506+ "0x%x\n", vport->auth.auth_state);
3507+
3508+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
3509+ return;
3510+ }
3511+
3512+ if (trans_id != vport->auth.trans_id) {
3513+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3514+ "1040 Bad Reply trans_id- rejecting. "
3515+ "Trans_id: 0x%x Expecting: 0x%x \n",
3516+ trans_id, vport->auth.trans_id);
3517+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
3518+ return;
3519+ }
3520+
3521+ /* Zero is a valid length to be returned */
3522+ data_len = lpfc_unpack_dhchap_reply(vport, authcmd->data, fc_req);
3523+ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id;
3524+ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id;
3525+ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id;
3526+ fc_req->u.dhchap_success.our_challenge_len = vport->auth.challenge_len;
3527+ memcpy(fc_req->u.dhchap_success.data, vport->auth.challenge,
3528+ vport->auth.challenge_len);
3529+
3530+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
3531+
3532+ if (lpfc_fc_security_dhchap_authenticate(shost, fc_req,
3533+ (sizeof(struct fc_auth_req) +
3534+ data_len + vport->auth.challenge_len),
3535+ fc_rsp, MAX_AUTH_RSP_SIZE)) {
3536+ kfree(fc_rsp);
3537+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
3538+ }
3539+
3540+ kfree(fc_req);
3541+
3542+}
3543+
3544+static void
3545+lpfc_els_rcv_chap_suc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3546+ struct lpfc_nodelist *ndlp)
3547+{
3548+
3549+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3550+ struct lpfc_dmabuf *pcmd = cmdiocb->context2;
3551+ struct lpfc_auth_message *authcmd;
3552+ uint32_t message_len;
3553+ uint32_t trans_id;
3554+ struct fc_auth_req *fc_req;
3555+ struct fc_auth_rsp *fc_rsp;
3556+ uint32_t data_len;
3557+
3558+ authcmd = pcmd->virt;
3559+ message_len = be32_to_cpu(authcmd->message_len);
3560+ trans_id = be32_to_cpu(authcmd->trans_id);
3561+
3562+ lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3563+
3564+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_REPLY &&
3565+ vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS_REPLY) {
3566+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PROTOCOL);
3567+ return;
3568+ }
3569+
3570+ if (trans_id != vport->auth.trans_id) {
3571+ lpfc_issue_els_auth_reject(vport, ndlp, AUTH_ERR, BAD_PAYLOAD);
3572+ return;
3573+ }
3574+
3575+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_REPLY &&
3576+ vport->auth.bidirectional) {
3577+
3578+ fc_req = kzalloc(MAX_AUTH_REQ_SIZE, GFP_KERNEL);
3579+ if (!fc_req)
3580+ return;
3581+
3582+ fc_req->local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
3583+ if (ndlp->nlp_type & NLP_FABRIC)
3584+ fc_req->remote_wwpn = AUTH_FABRIC_WWN;
3585+ else
3586+ fc_req->remote_wwpn =
3587+ wwn_to_u64(ndlp->nlp_portname.u.wwn);
3588+ fc_req->u.dhchap_success.hash_id = vport->auth.hash_id;
3589+ fc_req->u.dhchap_success.dh_group_id = vport->auth.group_id;
3590+ fc_req->u.dhchap_success.transaction_id = vport->auth.trans_id;
3591+ fc_req->u.dhchap_success.our_challenge_len =
3592+ vport->auth.challenge_len;
3593+
3594+ memcpy(fc_req->u.dhchap_success.data, vport->auth.challenge,
3595+ vport->auth.challenge_len);
3596+
3597+ /* Zero is a valid return length */
3598+ data_len = lpfc_unpack_dhchap_success(vport,
3599+ authcmd->data,
3600+ fc_req);
3601+
3602+ fc_rsp = kzalloc(MAX_AUTH_RSP_SIZE, GFP_KERNEL);
3603+ if (!fc_rsp)
3604+ return;
3605+
3606+ if (lpfc_fc_security_dhchap_authenticate(shost,
3607+ fc_req, sizeof(struct fc_auth_req) + data_len,
3608+ fc_rsp, MAX_AUTH_RSP_SIZE)) {
3609+ kfree(fc_rsp);
3610+ lpfc_issue_els_auth_reject(vport, ndlp, LOGIC_ERR, 0);
3611+ }
3612+
3613+ kfree(fc_req);
3614+
3615+ } else {
3616+ vport->auth.auth_msg_state = LPFC_DHCHAP_SUCCESS;
3617+
3618+ kfree(vport->auth.challenge);
3619+ vport->auth.challenge = NULL;
3620+ vport->auth.challenge_len = 0;
3621+
3622+ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
3623+ vport->auth.auth_state = LPFC_AUTH_SUCCESS;
3624+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
3625+ "1041 Authentication Successful\n");
3626+
3627+ lpfc_start_discovery(vport);
3628+
3629+ } else {
3630+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
3631+ "1042 Re-Authentication Successful\n");
3632+ }
3633+ /* If config requires re-authentication start the timer */
3634+ vport->auth.last_auth = jiffies;
3635+ if (vport->auth.reauth_interval)
3636+ mod_timer(&ndlp->nlp_reauth_tmr, jiffies +
3637+ vport->auth.reauth_interval * 60 * HZ);
3638+ }
3639+ vport->auth.direction |= AUTH_DIRECTION_REMOTE;
3640+}
3641+
3642 /**
3643 * lpfc_send_els_failure_event: Posts an ELS command failure event.
3644 * @phba: Pointer to hba context object.
3645@@ -5462,6 +5866,48 @@ lpfc_els_unsol_buffer(struct lpfc_hba *p
3646 if (newnode)
3647 lpfc_nlp_put(ndlp);
3648 break;
3649+ case ELS_CMD_AUTH_RJT:
3650+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3651+ "RCV AUTH_RJT: did:x%x/ste:x%x flg:x%x",
3652+ did, vport->port_state, ndlp->nlp_flag);
3653+
3654+ lpfc_els_rcv_auth_rjt(vport, elsiocb, ndlp);
3655+ break;
3656+ case ELS_CMD_AUTH_NEG:
3657+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3658+ "RCV AUTH_NEG: did:x%x/ste:x%x flg:x%x",
3659+ did, vport->port_state, ndlp->nlp_flag);
3660+
3661+ lpfc_els_rcv_auth_neg(vport, elsiocb, ndlp);
3662+ break;
3663+ case ELS_CMD_DH_CHA:
3664+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3665+ "RCV DH_CHA: did:x%x/ste:x%x flg:x%x",
3666+ did, vport->port_state, ndlp->nlp_flag);
3667+
3668+ lpfc_els_rcv_chap_chal(vport, elsiocb, ndlp);
3669+ break;
3670+ case ELS_CMD_DH_REP:
3671+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3672+ "RCV DH_REP: did:x%x/ste:x%x flg:x%x",
3673+ did, vport->port_state, ndlp->nlp_flag);
3674+
3675+ lpfc_els_rcv_chap_reply(vport, elsiocb, ndlp);
3676+ break;
3677+ case ELS_CMD_DH_SUC:
3678+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3679+ "RCV DH_SUC: did:x%x/ste:x%x flg:x%x",
3680+ did, vport->port_state, ndlp->nlp_flag);
3681+
3682+ lpfc_els_rcv_chap_suc(vport, elsiocb, ndlp);
3683+ break;
3684+
3685+ case ELS_CMD_AUTH_DONE:
3686+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3687+ "RCV AUTH_DONE: did:x%x/ste:x%x flg:x%x",
3688+ did, vport->port_state, ndlp->nlp_flag);
3689+
3690+
3691 default:
3692 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3693 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
3694@@ -5747,7 +6193,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba
3695 } else {
3696 if (vport == phba->pport)
3697 lpfc_issue_fabric_reglogin(vport);
3698- else
3699+ else if (!vport->cfg_enable_auth)
3700 lpfc_do_scr_ns_plogi(phba, vport);
3701 }
3702
3703@@ -5840,6 +6286,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
3704 struct lpfc_nodelist *next_np;
3705 IOCB_t *irsp = &rspiocb->iocb;
3706 struct lpfc_iocbq *piocb;
3707+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
3708+ struct serv_parm *sp;
3709+ struct lpfc_name wwpn;
3710
3711 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3712 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
3713@@ -5867,11 +6316,26 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
3714 irsp->ulpStatus, irsp->un.ulpWord[4]);
3715 goto fdisc_failed;
3716 }
3717- if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
3718- lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3719- lpfc_nlp_put(ndlp);
3720- /* giving up on FDISC. Cancel discovery timer */
3721- lpfc_can_disctmo(vport);
3722+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3723+ sp = prsp->virt + sizeof(uint32_t);
3724+ if (sp->cmn.security)
3725+ ndlp->nlp_flag |= NLP_SC_REQ;
3726+ else
3727+ ndlp->nlp_flag &= ~NLP_SC_REQ;
3728+ if (vport->cfg_enable_auth) {
3729+ u64_to_wwn(AUTH_FABRIC_WWN, wwpn.u.wwn);
3730+ if (lpfc_get_auth_config(ndlp, &wwpn))
3731+ goto fdisc_failed;
3732+ } else {
3733+ vport->auth.security_active = 0;
3734+ if (sp->cmn.security) {
3735+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
3736+ "1056 Authentication mode is "
3737+ "disabled, but is required "
3738+ "by the fabric.\n");
3739+ goto fdisc_failed;
3740+ }
3741+ }
3742 spin_lock_irq(shost->host_lock);
3743 vport->fc_flag |= FC_FABRIC;
3744 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
3745@@ -5905,7 +6369,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phb
3746
3747 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
3748 lpfc_register_new_vport(phba, vport, ndlp);
3749- else
3750+ else if (!vport->cfg_enable_auth)
3751 lpfc_do_scr_ns_plogi(phba, vport);
3752 goto out;
3753 fdisc_failed:
3754@@ -5980,6 +6444,10 @@ lpfc_issue_els_fdisc(struct lpfc_vport *
3755 sp->cls2.seqDelivery = 1;
3756 sp->cls3.seqDelivery = 1;
3757
3758+ /* Set the security service parameter */
3759+ if (vport->cfg_enable_auth)
3760+ sp->cmn.security = 1;
3761+
3762 pcmd += sizeof(uint32_t); /* CSP Word 2 */
3763 pcmd += sizeof(uint32_t); /* CSP Word 3 */
3764 pcmd += sizeof(uint32_t); /* CSP Word 4 */
3765@@ -6474,3 +6942,180 @@ void lpfc_fabric_abort_hba(struct lpfc_h
3766 (piocb->iocb_cmpl) (phba, piocb, piocb);
3767 }
3768 }
3769+static void
3770+lpfc_cmpl_els_auth(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3771+ struct lpfc_iocbq *rspiocb)
3772+{
3773+ IOCB_t *irsp = &rspiocb->iocb;
3774+ struct lpfc_vport *vport = cmdiocb->vport;
3775+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3776+
3777+ /* Check to see if link went down during discovery */
3778+ if (lpfc_els_chk_latt(vport)) {
3779+ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
3780+ lpfc_els_free_iocb(phba, cmdiocb);
3781+ return;
3782+ }
3783+
3784+ if (irsp->ulpStatus) {
3785+ if (irsp->ulpStatus == IOSTAT_LS_RJT) {
3786+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3787+ "1043 Authentication LS_RJT\n");
3788+ }
3789+ /* Check for retry */
3790+ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) {
3791+ if (irsp->ulpStatus != IOSTAT_LS_RJT) {
3792+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3793+ "1045 Issue AUTH_NEG failed."
3794+ "Status:%x\n",
3795+ irsp->ulpStatus);
3796+ }
3797+ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE) {
3798+ lpfc_can_disctmo(vport);
3799+ lpfc_port_auth_failed(ndlp);
3800+ }
3801+ }
3802+ if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3803+ (phba->link_state != LPFC_CLEAR_LA))
3804+ lpfc_issue_clear_la(phba, vport);
3805+ lpfc_els_free_iocb(phba, cmdiocb);
3806+ return;
3807+ }
3808+
3809+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS ||
3810+ vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS_REPLY) {
3811+
3812+ kfree(vport->auth.challenge);
3813+ vport->auth.challenge = NULL;
3814+ vport->auth.challenge_len = 0;
3815+ kfree(vport->auth.dh_pub_key);
3816+ vport->auth.dh_pub_key = NULL;
3817+ vport->auth.dh_pub_key_len = 0;
3818+
3819+ if (vport->auth.auth_msg_state == LPFC_DHCHAP_SUCCESS) {
3820+ if (vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
3821+ lpfc_printf_vlog(vport, KERN_WARNING,
3822+ LOG_SECURITY, "1046 "
3823+ "Authentication Successful\n");
3824+ vport->auth.auth_state = LPFC_AUTH_SUCCESS;
3825+ lpfc_start_discovery(vport);
3826+ } else {
3827+ lpfc_printf_vlog(vport, KERN_WARNING,
3828+ LOG_SECURITY,
3829+ "1047 Re-Authentication"
3830+ " Successful\n");
3831+ }
3832+ }
3833+ /* restart authentication timer */
3834+ vport->auth.last_auth = jiffies;
3835+ if (vport->auth.reauth_interval)
3836+ mod_timer(&ndlp->nlp_reauth_tmr,
3837+ jiffies +
3838+ vport->auth.reauth_interval * 60 * HZ);
3839+ }
3840+ lpfc_els_free_iocb(phba, cmdiocb);
3841+}
3842+
3843+int
3844+lpfc_issue_els_auth(struct lpfc_vport *vport,
3845+ struct lpfc_nodelist *ndlp,
3846+ uint8_t message_code,
3847+ uint8_t *payload,
3848+ uint32_t payload_len)
3849+{
3850+ struct lpfc_hba *phba = vport->phba;
3851+ struct lpfc_iocbq *elsiocb;
3852+ struct lpfc_auth_message *authreq;
3853+
3854+ elsiocb = lpfc_prep_els_iocb(vport, 1,
3855+ sizeof(struct lpfc_auth_message) + payload_len,
3856+ 0, ndlp, ndlp->nlp_DID, ELS_CMD_AUTH);
3857+
3858+ if (!elsiocb)
3859+ return 1;
3860+ authreq = (struct lpfc_auth_message *)
3861+ (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3862+ authreq->command_code = ELS_CMD_AUTH_BYTE;
3863+ authreq->flags = 0;
3864+ authreq->message_code = message_code;
3865+ authreq->protocol_ver = AUTH_VERSION;
3866+ authreq->message_len = cpu_to_be32(payload_len);
3867+ authreq->trans_id = cpu_to_be32(vport->auth.trans_id);
3868+ memcpy(authreq->data, payload, payload_len);
3869+
3870+ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth;
3871+
3872+ if (lpfc_sli_issue_iocb(phba, &phba->sli.ring[LPFC_ELS_RING],
3873+ elsiocb, 0) == IOCB_ERROR) {
3874+ lpfc_els_free_iocb(phba, elsiocb);
3875+ return 1;
3876+ }
3877+
3878+ return 0;
3879+}
3880+
3881+static void
3882+lpfc_cmpl_els_auth_reject(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3883+ struct lpfc_iocbq *rspiocb)
3884+{
3885+ struct lpfc_vport *vport = cmdiocb->vport;
3886+ IOCB_t *irsp = &rspiocb->iocb;
3887+
3888+ if (irsp->ulpStatus) {
3889+ /* Check for retry */
3890+ if (!lpfc_els_retry(phba, cmdiocb, rspiocb)) {
3891+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3892+ "1048 Issue AUTH_REJECT failed.\n");
3893+ }
3894+ } else
3895+ vport->port_state = LPFC_VPORT_UNKNOWN;
3896+
3897+ lpfc_els_free_iocb(phba, cmdiocb);
3898+}
3899+
3900+int
3901+lpfc_issue_els_auth_reject(struct lpfc_vport *vport,
3902+ struct lpfc_nodelist *ndlp,
3903+ uint8_t reason, uint8_t explanation)
3904+{
3905+ struct lpfc_hba *phba = vport->phba;
3906+ struct lpfc_iocbq *elsiocb;
3907+ struct lpfc_sli_ring *pring;
3908+ struct lpfc_sli *psli;
3909+ struct lpfc_auth_message *authreq;
3910+ struct lpfc_auth_reject *reject;
3911+
3912+ psli = &phba->sli;
3913+ pring = &psli->ring[LPFC_ELS_RING];
3914+
3915+ vport->auth.auth_msg_state = LPFC_AUTH_REJECT;
3916+
3917+ elsiocb = lpfc_prep_els_iocb(vport, 1, sizeof(struct lpfc_auth_message)
3918+ + sizeof(struct lpfc_auth_reject), 0, ndlp,
3919+ ndlp->nlp_DID, ELS_CMD_AUTH);
3920+
3921+ if (!elsiocb)
3922+ return 1;
3923+
3924+ authreq = (struct lpfc_auth_message *)
3925+ (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3926+ authreq->command_code = ELS_CMD_AUTH_BYTE;
3927+ authreq->flags = 0;
3928+ authreq->message_code = AUTH_REJECT;
3929+ authreq->protocol_ver = AUTH_VERSION;
3930+ reject = (struct lpfc_auth_reject *)authreq->data;
3931+ memset(reject, 0, sizeof(struct lpfc_auth_reject));
3932+ reject->reason = reason;
3933+ reject->explanation = explanation;
3934+
3935+ authreq->message_len = cpu_to_be32(sizeof(struct lpfc_auth_reject));
3936+ authreq->trans_id = cpu_to_be32(vport->auth.trans_id);
3937+ elsiocb->iocb_cmpl = lpfc_cmpl_els_auth_reject;
3938+
3939+ if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
3940+ lpfc_els_free_iocb(phba, elsiocb);
3941+ return 1;
3942+ }
3943+
3944+ return 0;
3945+}
3946--- a/drivers/scsi/lpfc/lpfc.h
3947+++ b/drivers/scsi/lpfc/lpfc.h
3948@@ -217,18 +217,81 @@ struct lpfc_stats {
3949 uint32_t fcpLocalErr;
3950 };
3951
3952+struct lpfc_dmabufext {
3953+ struct lpfc_dmabuf dma;
3954+ uint32_t size;
3955+ uint32_t flag;
3956+};
3957+
3958 enum sysfs_mbox_state {
3959 SMBOX_IDLE,
3960 SMBOX_WRITING,
3961- SMBOX_READING
3962+ SMBOX_WRITING_MBEXT,
3963+ SMBOX_READING_MBEXT,
3964+ SMBOX_READING,
3965+ SMBOX_WRITING_BUFF,
3966+ SMBOX_READING_BUFF
3967+};
3968+
3969+struct lpfc_sysfs_mbox_data {
3970+ MAILBOX_t mbox;
3971+ uint32_t mboffset;
3972+ uint32_t in_ext_wlen;
3973+ uint32_t out_ext_wlen;
3974 };
3975
3976 struct lpfc_sysfs_mbox {
3977+ struct lpfc_sysfs_mbox_data mbox_data;
3978 enum sysfs_mbox_state state;
3979 size_t offset;
3980 struct lpfcMboxq * mbox;
3981+ /* process id of the mgmt application */
3982+ pid_t pid;
3983+ struct list_head list;
3984+ uint8_t * mbext;
3985+ uint32_t extoff;
3986+ struct lpfc_dmabuf * txmit_buff;
3987+ struct lpfc_dmabuf * rcv_buff;
3988+};
3989+#define MENLO_DID 0x0000FC0E
3990+
3991+enum sysfs_menlo_state {
3992+ SMENLO_IDLE,
3993+ SMENLO_WRITING,
3994+ SMENLO_WRITING_MBEXT,
3995+ SMENLO_READING
3996+};
3997+
3998+struct lpfc_sysfs_menlo_hdr {
3999+ uint32_t cmd;
4000+ uint32_t cmdsize;
4001+ uint32_t rspsize;
4002+};
4003+
4004+struct lpfc_menlo_genreq64 {
4005+ size_t offset;
4006+ struct lpfc_iocbq *cmdiocbq;
4007+ struct lpfc_iocbq *rspiocbq;
4008+ struct lpfc_dmabuf *bmp;
4009+ struct lpfc_dmabufext *indmp;
4010+ struct ulp_bde64 *cmdbpl;
4011+ struct lpfc_dmabufext *outdmp;
4012+ uint32_t timeout;
4013+ struct list_head inhead;
4014+ struct list_head outhead;
4015 };
4016
4017+struct lpfc_sysfs_menlo {
4018+ enum sysfs_menlo_state state;
4019+ /* process id of the mgmt application */
4020+ struct lpfc_sysfs_menlo_hdr cmdhdr;
4021+ struct lpfc_menlo_genreq64 cr;
4022+ struct lpfc_menlo_genreq64 cx;
4023+ pid_t pid;
4024+ struct list_head list;
4025+};
4026+
4027+
4028 struct lpfc_hba;
4029
4030
4031@@ -261,6 +324,52 @@ enum hba_state {
4032 LPFC_HBA_ERROR = -1
4033 };
4034
4035+enum auth_state {
4036+ LPFC_AUTH_UNKNOWN = 0,
4037+ LPFC_AUTH_SUCCESS = 1,
4038+ LPFC_AUTH_FAIL = 2,
4039+};
4040+enum auth_msg_state {
4041+ LPFC_AUTH_NONE = 0,
4042+ LPFC_AUTH_REJECT = 1, /* Sent a Reject */
4043+ LPFC_AUTH_NEGOTIATE = 2, /* Auth Negotiate */
4044+ LPFC_DHCHAP_CHALLENGE = 3, /* Challenge */
4045+ LPFC_DHCHAP_REPLY = 4, /* Reply */
4046+ LPFC_DHCHAP_SUCCESS_REPLY = 5, /* Success with Reply */
4047+ LPFC_DHCHAP_SUCCESS = 6, /* Success */
4048+ LPFC_AUTH_DONE = 7,
4049+};
4050+
4051+struct lpfc_auth {
4052+ uint8_t auth_mode;
4053+ uint8_t bidirectional;
4054+ uint8_t hash_priority[4];
4055+ uint32_t hash_len;
4056+ uint8_t dh_group_priority[8];
4057+ uint32_t dh_group_len;
4058+ uint32_t reauth_interval;
4059+
4060+ uint8_t security_active;
4061+ enum auth_state auth_state;
4062+ enum auth_msg_state auth_msg_state;
4063+ uint32_t trans_id; /* current transaction id. Can be set
4064+ by incomming transactions as well */
4065+ uint32_t group_id;
4066+ uint32_t hash_id;
4067+ uint32_t direction;
4068+#define AUTH_DIRECTION_NONE 0
4069+#define AUTH_DIRECTION_REMOTE 0x1
4070+#define AUTH_DIRECTION_LOCAL 0x2
4071+#define AUTH_DIRECTION_BIDI (AUTH_DIRECTION_LOCAL|AUTH_DIRECTION_REMOTE)
4072+
4073+ uint8_t *challenge;
4074+ uint32_t challenge_len;
4075+ uint8_t *dh_pub_key;
4076+ uint32_t dh_pub_key_len;
4077+
4078+ unsigned long last_auth;
4079+};
4080+
4081 struct lpfc_vport {
4082 struct list_head listentry;
4083 struct lpfc_hba *phba;
4084@@ -356,6 +465,14 @@ struct lpfc_vport {
4085 #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
4086 char *vname; /* Application assigned name */
4087
4088+ /* Fields used for accessing auth service */
4089+ struct lpfc_auth auth;
4090+ uint32_t sc_tran_id;
4091+ struct list_head sc_response_wait_queue;
4092+ struct list_head sc_users;
4093+ struct work_struct sc_online_work;
4094+ struct work_struct sc_offline_work;
4095+
4096 /* Vport Config Parameters */
4097 uint32_t cfg_scan_down;
4098 uint32_t cfg_lun_queue_depth;
4099@@ -371,6 +488,7 @@ struct lpfc_vport {
4100 uint32_t cfg_max_luns;
4101 uint32_t cfg_enable_da_id;
4102 uint32_t cfg_max_scsicmpl_time;
4103+ uint32_t cfg_enable_auth;
4104
4105 uint32_t dev_loss_tmo_changed;
4106
4107@@ -445,6 +563,7 @@ struct lpfc_hba {
4108 struct lpfc_dmabuf slim2p;
4109
4110 MAILBOX_t *mbox;
4111+ uint32_t *mbox_ext;
4112 uint32_t *inb_ha_copy;
4113 uint32_t *inb_counter;
4114 uint32_t inb_last_counter;
4115@@ -573,7 +692,9 @@ struct lpfc_hba {
4116 uint64_t fc4OutputRequests;
4117 uint64_t fc4ControlRequests;
4118
4119- struct lpfc_sysfs_mbox sysfs_mbox;
4120+ /* List of mailbox commands issued through sysfs */
4121+ struct list_head sysfs_mbox_list;
4122+ struct list_head sysfs_menlo_list;
4123
4124 /* fastpath list. */
4125 spinlock_t scsi_buf_list_lock;
4126@@ -595,11 +716,13 @@ struct lpfc_hba {
4127 struct fc_host_statistics link_stats;
4128 enum intr_type_t intr_type;
4129 struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
4130+ struct lpfcdfc_host *dfc_host;
4131
4132 struct list_head port_list;
4133 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
4134 uint16_t max_vpi; /* Maximum virtual nports */
4135-#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
4136+#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
4137+#define LPFC_INTR_VPI 100 /* Intermediate VPI supported */
4138 unsigned long *vpi_bmask; /* vpi allocation table */
4139
4140 /* Data structure used by fabric iocb scheduler */
4141--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
4142+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
4143@@ -59,6 +59,47 @@ static uint8_t lpfcAlpaArray[] = {
4144
4145 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
4146 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
4147+void
4148+lpfc_start_discovery(struct lpfc_vport *vport)
4149+{
4150+ struct lpfc_hba *phba = vport->phba;
4151+ struct lpfc_vport **vports;
4152+ int i;
4153+
4154+ if (vport->auth.security_active &&
4155+ vport->auth.auth_state != LPFC_AUTH_SUCCESS) {
4156+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
4157+ "0285 Authentication not complete.\n");
4158+ return;
4159+ }
4160+ if (vport->port_type == LPFC_NPIV_PORT) {
4161+ lpfc_do_scr_ns_plogi(phba, vport);
4162+ return;
4163+ }
4164+
4165+ vports = lpfc_create_vport_work_array(phba);
4166+ if (vports != NULL)
4167+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
4168+ if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
4169+ continue;
4170+ if (phba->fc_topology == TOPOLOGY_LOOP) {
4171+ lpfc_vport_set_state(vports[i],
4172+ FC_VPORT_LINKDOWN);
4173+ continue;
4174+ }
4175+ if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
4176+ lpfc_initial_fdisc(vports[i]);
4177+ else {
4178+ lpfc_vport_set_state(vports[i],
4179+ FC_VPORT_NO_FABRIC_SUPP);
4180+ lpfc_printf_vlog(vports[i], KERN_ERR, LOG_ELS,
4181+ "0259 No NPIV Fabric "
4182+ "support\n");
4183+ }
4184+ }
4185+ lpfc_destroy_vport_work_array(phba, vports);
4186+ lpfc_do_scr_ns_plogi(phba, vport);
4187+}
4188
4189 void
4190 lpfc_terminate_rport_io(struct fc_rport *rport)
4191@@ -416,6 +457,15 @@ lpfc_work_list_done(struct lpfc_hba *phb
4192 */
4193 lpfc_nlp_put(ndlp);
4194 break;
4195+ case LPFC_EVT_REAUTH:
4196+ ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
4197+ lpfc_reauthentication_handler(ndlp);
4198+ free_evt = 0; /* evt is part of ndlp */
4199+ /* decrement the node reference count held
4200+ * for this queued work
4201+ */
4202+ lpfc_nlp_put(ndlp);
4203+ break;
4204 case LPFC_EVT_DEV_LOSS:
4205 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
4206 lpfc_dev_loss_tmo_handler(ndlp);
4207@@ -648,6 +698,9 @@ lpfc_cleanup_rpis(struct lpfc_vport *vpo
4208 continue;
4209 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4210 continue;
4211+ /* Stop re-authentication timer of all nodes. */
4212+ del_timer_sync(&ndlp->nlp_reauth_tmr);
4213+
4214 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
4215 ((vport->port_type == LPFC_NPIV_PORT) &&
4216 (ndlp->nlp_DID == NameServer_DID)))
4217@@ -697,6 +750,23 @@ lpfc_linkdown_port(struct lpfc_vport *vp
4218
4219 lpfc_port_link_failure(vport);
4220
4221+ vport->auth.auth_state = LPFC_AUTH_UNKNOWN;
4222+ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
4223+}
4224+
4225+void
4226+lpfc_port_auth_failed(struct lpfc_nodelist *ndlp)
4227+{
4228+ struct lpfc_vport *vport = ndlp->vport;
4229+
4230+ vport->auth.auth_state = LPFC_AUTH_FAIL;
4231+ vport->auth.auth_msg_state = LPFC_AUTH_NONE;
4232+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4233+ if (ndlp->nlp_type & NLP_FABRIC) {
4234+ lpfc_port_link_failure(vport);
4235+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4236+ lpfc_issue_els_logo(vport, ndlp, 0);
4237+ }
4238 }
4239
4240 int
4241@@ -801,7 +871,6 @@ lpfc_linkup_port(struct lpfc_vport *vpor
4242 return;
4243
4244 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4245-
4246 spin_lock_irq(shost->host_lock);
4247 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
4248 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
4249@@ -1424,8 +1493,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp
4250 MAILBOX_t *mb = &pmb->mb;
4251 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
4252 struct lpfc_nodelist *ndlp;
4253- struct lpfc_vport **vports;
4254- int i;
4255
4256 ndlp = (struct lpfc_nodelist *) pmb->context2;
4257 pmb->context1 = NULL;
4258@@ -1463,33 +1530,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lp
4259 ndlp->nlp_type |= NLP_FABRIC;
4260 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4261
4262- if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
4263- vports = lpfc_create_vport_work_array(phba);
4264- if (vports != NULL)
4265- for(i = 0;
4266- i <= phba->max_vpi && vports[i] != NULL;
4267- i++) {
4268- if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
4269- continue;
4270- if (phba->fc_topology == TOPOLOGY_LOOP) {
4271- lpfc_vport_set_state(vports[i],
4272- FC_VPORT_LINKDOWN);
4273- continue;
4274- }
4275- if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
4276- lpfc_initial_fdisc(vports[i]);
4277- else {
4278- lpfc_vport_set_state(vports[i],
4279- FC_VPORT_NO_FABRIC_SUPP);
4280- lpfc_printf_vlog(vport, KERN_ERR,
4281- LOG_ELS,
4282- "0259 No NPIV "
4283- "Fabric support\n");
4284- }
4285- }
4286- lpfc_destroy_vport_work_array(phba, vports);
4287- lpfc_do_scr_ns_plogi(phba, vport);
4288- }
4289+ if (vport->port_state == LPFC_FABRIC_CFG_LINK &&
4290+ !vport->cfg_enable_auth)
4291+ lpfc_start_discovery(vport);
4292
4293 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4294 kfree(mp);
4295@@ -1894,9 +1937,13 @@ lpfc_enable_node(struct lpfc_vport *vpor
4296 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4297 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4298 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4299+ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
4300 init_timer(&ndlp->nlp_delayfunc);
4301 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
4302 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4303+ init_timer(&ndlp->nlp_reauth_tmr);
4304+ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
4305+ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
4306 ndlp->nlp_DID = did;
4307 ndlp->vport = vport;
4308 ndlp->nlp_sid = NLP_NO_SID;
4309@@ -2264,9 +2311,12 @@ lpfc_cleanup_node(struct lpfc_vport *vpo
4310
4311 ndlp->nlp_last_elscmd = 0;
4312 del_timer_sync(&ndlp->nlp_delayfunc);
4313+ del_timer_sync(&ndlp->nlp_reauth_tmr);
4314
4315 list_del_init(&ndlp->els_retry_evt.evt_listp);
4316 list_del_init(&ndlp->dev_loss_evt.evt_listp);
4317+ if (!list_empty(&ndlp->els_reauth_evt.evt_listp))
4318+ list_del_init(&ndlp->els_reauth_evt.evt_listp);
4319
4320 lpfc_unreg_rpi(vport, ndlp);
4321
4322@@ -3073,7 +3123,14 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist
4323 sizeof(ndlp->nlp_portname)) == 0;
4324 }
4325
4326-static struct lpfc_nodelist *
4327+static int
4328+lpfc_filter_by_wwnn(struct lpfc_nodelist *ndlp, void *param)
4329+{
4330+ return memcmp(&ndlp->nlp_nodename, param,
4331+ sizeof(ndlp->nlp_nodename)) == 0;
4332+}
4333+
4334+struct lpfc_nodelist *
4335 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
4336 {
4337 struct lpfc_nodelist *ndlp;
4338@@ -3086,6 +3143,22 @@ __lpfc_find_node(struct lpfc_vport *vpor
4339 }
4340
4341 /*
4342+ * Search node lists for a remote port matching filter criteria
4343+ * Caller needs to hold host_lock before calling this routine.
4344+ */
4345+struct lpfc_nodelist *
4346+lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
4347+{
4348+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4349+ struct lpfc_nodelist *ndlp;
4350+
4351+ spin_lock_irq(shost->host_lock);
4352+ ndlp = __lpfc_find_node(vport, filter, param);
4353+ spin_unlock_irq(shost->host_lock);
4354+ return ndlp;
4355+}
4356+
4357+/*
4358 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4359 * returns the node list element pointer else return NULL.
4360 */
4361@@ -3111,6 +3184,21 @@ lpfc_findnode_wwpn(struct lpfc_vport *vp
4362 return ndlp;
4363 }
4364
4365+/*
4366+ * This routine looks up the ndlp lists for the given WWNN. If WWNN found it
4367+ * returns the node element list pointer else return NULL.
4368+ */
4369+struct lpfc_nodelist *
4370+lpfc_findnode_wwnn(struct lpfc_vport *vport, struct lpfc_name *wwnn)
4371+{
4372+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4373+ struct lpfc_nodelist *ndlp;
4374+
4375+ spin_lock_irq(shost->host_lock);
4376+ ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwnn, wwnn);
4377+ spin_unlock_irq(shost->host_lock);
4378+ return ndlp;
4379+}
4380 void
4381 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4382 uint32_t did)
4383@@ -3118,9 +3206,13 @@ lpfc_nlp_init(struct lpfc_vport *vport,
4384 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
4385 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4386 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4387+ INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp);
4388 init_timer(&ndlp->nlp_delayfunc);
4389 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
4390 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4391+ init_timer(&ndlp->nlp_reauth_tmr);
4392+ ndlp->nlp_reauth_tmr.function = lpfc_reauth_node;
4393+ ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
4394 ndlp->nlp_DID = did;
4395 ndlp->vport = vport;
4396 ndlp->nlp_sid = NLP_NO_SID;
4397--- a/drivers/scsi/lpfc/lpfc_hw.h
4398+++ b/drivers/scsi/lpfc/lpfc_hw.h
4399@@ -64,6 +64,7 @@
4400 #define SLI3_IOCB_CMD_SIZE 128
4401 #define SLI3_IOCB_RSP_SIZE 64
4402
4403+#define BUF_SZ_4K 4096
4404
4405 /* Common Transport structures and definitions */
4406
4407@@ -350,7 +351,8 @@ struct csp {
4408
4409 uint16_t huntgroup:1; /* FC Word 1, bit 23 */
4410 uint16_t simplex:1; /* FC Word 1, bit 22 */
4411- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
4412+ uint16_t security:1; /* FC Word 1, bit 21 */
4413+ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */
4414 uint16_t dhd:1; /* FC Word 1, bit 18 */
4415 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
4416 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
4417@@ -367,7 +369,8 @@ struct csp {
4418 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
4419 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
4420 uint16_t dhd:1; /* FC Word 1, bit 18 */
4421- uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
4422+ uint16_t word1Reserved1:2; /* FC Word 1, bit 20:19 */
4423+ uint16_t security:1; /* FC Word 1, bit 21 */
4424 uint16_t simplex:1; /* FC Word 1, bit 22 */
4425 uint16_t huntgroup:1; /* FC Word 1, bit 23 */
4426 #endif
4427@@ -506,6 +509,17 @@ struct serv_parm { /* Structure is in Bi
4428 #define ELS_CMD_SCR 0x62000000
4429 #define ELS_CMD_RNID 0x78000000
4430 #define ELS_CMD_LIRR 0x7A000000
4431+/*
4432+ * ELS commands for authentication
4433+ * ELS_CMD_AUTH<<24 | AUTH_NEGOTIATE<<8 | AUTH_VERSION
4434+ */
4435+#define ELS_CMD_AUTH 0x90000000
4436+#define ELS_CMD_AUTH_RJT 0x90000A01
4437+#define ELS_CMD_AUTH_NEG 0x90000B01
4438+#define ELS_CMD_AUTH_DONE 0x90000C01
4439+#define ELS_CMD_DH_CHA 0x90001001
4440+#define ELS_CMD_DH_REP 0x90001101
4441+#define ELS_CMD_DH_SUC 0x90001201
4442 #else /* __LITTLE_ENDIAN_BITFIELD */
4443 #define ELS_CMD_MASK 0xffff
4444 #define ELS_RSP_MASK 0xff
4445@@ -542,6 +556,17 @@ struct serv_parm { /* Structure is in Bi
4446 #define ELS_CMD_SCR 0x62
4447 #define ELS_CMD_RNID 0x78
4448 #define ELS_CMD_LIRR 0x7A
4449+/*
4450+ * ELS commands for authentication
4451+ * ELS_CMD_AUTH | AUTH_NEGOTIATE<<16 | AUTH_VERSION<<24
4452+ */
4453+#define ELS_CMD_AUTH 0x00000090
4454+#define ELS_CMD_AUTH_RJT 0x010A0090
4455+#define ELS_CMD_AUTH_NEG 0x010B0090
4456+#define ELS_CMD_AUTH_DONE 0x010C0090
4457+#define ELS_CMD_DH_CHA 0x01100090
4458+#define ELS_CMD_DH_REP 0x01110090
4459+#define ELS_CMD_DH_SUC 0x01120090
4460 #endif
4461
4462 /*
4463@@ -1319,6 +1344,9 @@ typedef struct { /* FireFly BIU registe
4464 #define MBX_HEARTBEAT 0x31
4465 #define MBX_WRITE_VPARMS 0x32
4466 #define MBX_ASYNCEVT_ENABLE 0x33
4467+#define MBX_READ_EVENT_LOG_STATUS 0x37
4468+#define MBX_READ_EVENT_LOG 0x38
4469+#define MBX_WRITE_EVENT_LOG 0x39
4470
4471 #define MBX_PORT_CAPABILITIES 0x3B
4472 #define MBX_PORT_IOV_CONTROL 0x3C
4473@@ -1457,6 +1485,7 @@ typedef struct { /* FireFly BIU registe
4474 #define MBXERR_BAD_RCV_LENGTH 14
4475 #define MBXERR_DMA_ERROR 15
4476 #define MBXERR_ERROR 16
4477+#define MBXERR_UNKNOWN_CMD 18
4478 #define MBX_NOT_FINISHED 255
4479
4480 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */
4481@@ -1624,6 +1653,13 @@ typedef struct {
4482 } un;
4483 } BIU_DIAG_VAR;
4484
4485+/* Structure for MB command READ_EVENT_LOG (0x38) */
4486+typedef struct {
4487+ uint32_t rsvd1;
4488+ uint32_t offset;
4489+ struct ulp_bde64 rcv_bde64;
4490+}READ_EVENT_LOG_VAR;
4491+
4492 /* Structure for MB Command INIT_LINK (05) */
4493
4494 typedef struct {
4495@@ -2744,6 +2780,10 @@ typedef struct {
4496 /* Union of all Mailbox Command types */
4497 #define MAILBOX_CMD_WSIZE 32
4498 #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
4499+#define MAILBOX_EXT_WSIZE 512
4500+#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t))
4501+#define MAILBOX_HBA_EXT_OFFSET 0x100
4502+#define MAILBOX_MAX_XMIT_SIZE 1024
4503
4504 typedef union {
4505 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
4506@@ -2783,6 +2823,7 @@ typedef union {
4507 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
4508 ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */
4509 struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */
4510+ READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 (READ_EVENT_LOG) */
4511 } MAILVARIANTS;
4512
4513 /*
4514@@ -3364,14 +3405,16 @@ typedef struct _IOCB { /* IOCB structure
4515 #define SLI1_SLIM_SIZE (4 * 1024)
4516
4517 /* Up to 498 IOCBs will fit into 16k
4518- * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
4519+ * 256 (MAILBOX_t) + 512 mailbox extension +
4520+ * 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
4521 */
4522 #define SLI2_SLIM_SIZE (64 * 1024)
4523
4524 /* Maximum IOCBs that will fit in SLI2 slim */
4525 #define MAX_SLI2_IOCB 498
4526 #define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
4527- (sizeof(MAILBOX_t) + sizeof(PCB_t)))
4528+ (sizeof(MAILBOX_t) + sizeof(PCB_t) + \
4529+ sizeof(uint32_t) * MAILBOX_EXT_WSIZE))
4530
4531 /* HBQ entries are 4 words each = 4k */
4532 #define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
4533@@ -3379,6 +3422,7 @@ typedef struct _IOCB { /* IOCB structure
4534
4535 struct lpfc_sli2_slim {
4536 MAILBOX_t mbx;
4537+ uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE];
4538 PCB_t pcb;
4539 IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
4540 };
4541--- a/drivers/scsi/lpfc/lpfc_init.c
4542+++ b/drivers/scsi/lpfc/lpfc_init.c
4543@@ -44,7 +44,22 @@
4544 #include "lpfc_crtn.h"
4545 #include "lpfc_vport.h"
4546 #include "lpfc_version.h"
4547-
4548+#include "lpfc_auth_access.h"
4549+#include "lpfc_security.h"
4550+#include <net/sock.h>
4551+#include <linux/netlink.h>
4552+
4553+/* vendor ID used in SCSI netlink calls */
4554+#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
4555+const char *security_work_q_name = "fc_sc_wq";
4556+extern struct workqueue_struct *security_work_q;
4557+extern struct list_head fc_security_user_list;
4558+extern int fc_service_state;
4559+void lpfc_fc_sc_security_online(struct work_struct *work);
4560+void lpfc_fc_sc_security_offline(struct work_struct *work);
4561+int lpfc_fc_queue_security_work(struct lpfc_vport *, struct work_struct *);
4562+void lpfc_rcv_nl_event(struct notifier_block *, unsigned long , void *);
4563+#include "lpfc_ioctl.h"
4564 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
4565 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
4566 static int lpfc_post_rcv_buf(struct lpfc_hba *);
4567@@ -54,6 +69,26 @@ static struct scsi_transport_template *l
4568 static DEFINE_IDR(lpfc_hba_index);
4569
4570 /**
4571+ * lpfc_hba_max_vpi - Get the maximum supported VPI for an HBA
4572+ * @device: The PCI device ID for this HBA
4573+ *
4574+ * Description:
4575+ * This routine will return the maximum supported VPI limit for each HBA. In
4576+ * most cases the maximum VPI limit will be 0xFFFF, which indicates that the
4577+ * driver supports whatever the HBA can support. In some cases the driver
4578+ * supports fewer VPI that the HBA supports.
4579+ */
4580+static inline uint16_t
4581+lpfc_hba_max_vpi(unsigned short device)
4582+{
4583+ if ((device == PCI_DEVICE_ID_HELIOS) ||
4584+ (device == PCI_DEVICE_ID_ZEPHYR))
4585+ return LPFC_INTR_VPI;
4586+ else
4587+ return LPFC_MAX_VPI;
4588+}
4589+
4590+/**
4591 * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
4592 * @phba: pointer to lpfc hba data structure.
4593 *
4594@@ -444,9 +479,20 @@ lpfc_config_port_post(struct lpfc_hba *p
4595 /* Set up error attention (ERATT) polling timer */
4596 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4597
4598+ if (vport->cfg_enable_auth) {
4599+ if (lpfc_security_service_state == SECURITY_OFFLINE) {
4600+ lpfc_printf_log(vport->phba, KERN_ERR, LOG_SECURITY,
4601+ "1000 Authentication is enabled but "
4602+ "authentication service is not running\n");
4603+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
4604+ phba->link_state = LPFC_HBA_ERROR;
4605+ mempool_free(pmb, phba->mbox_mem_pool);
4606+ return 0;
4607+ }
4608+ }
4609+
4610 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
4611 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4612- lpfc_set_loopback_flag(phba);
4613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4614 if (rc != MBX_SUCCESS) {
4615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4616@@ -886,8 +932,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
4617 fc_host_post_vendor_event(shost, fc_get_event_number(),
4618 sizeof(temp_event_data),
4619 (char *) &temp_event_data,
4620- SCSI_NL_VID_TYPE_PCI
4621- | PCI_VENDOR_ID_EMULEX);
4622+ LPFC_NL_VENDOR_ID);
4623
4624 spin_lock_irq(&phba->hbalock);
4625 phba->over_temp_state = HBA_OVER_TEMP;
4626@@ -909,7 +954,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
4627 shost = lpfc_shost_from_vport(vport);
4628 fc_host_post_vendor_event(shost, fc_get_event_number(),
4629 sizeof(event_data), (char *) &event_data,
4630- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
4631+ LPFC_NL_VENDOR_ID);
4632
4633 lpfc_offline_eratt(phba);
4634 }
4635@@ -1675,8 +1720,15 @@ lpfc_cleanup(struct lpfc_vport *vport)
4636 void
4637 lpfc_stop_vport_timers(struct lpfc_vport *vport)
4638 {
4639+ struct fc_security_request *fc_sc_req;
4640 del_timer_sync(&vport->els_tmofunc);
4641 del_timer_sync(&vport->fc_fdmitmo);
4642+ while (!list_empty(&vport->sc_response_wait_queue)) {
4643+ fc_sc_req = list_get_first(&vport->sc_response_wait_queue,
4644+ struct fc_security_request, rlist);
4645+ del_timer_sync(&fc_sc_req->timer);
4646+ kfree(fc_sc_req);
4647+ }
4648 lpfc_can_disctmo(vport);
4649 return;
4650 }
4651@@ -1963,12 +2015,7 @@ lpfc_create_port(struct lpfc_hba *phba,
4652 struct Scsi_Host *shost;
4653 int error = 0;
4654
4655- if (dev != &phba->pcidev->dev)
4656- shost = scsi_host_alloc(&lpfc_vport_template,
4657- sizeof(struct lpfc_vport));
4658- else
4659- shost = scsi_host_alloc(&lpfc_template,
4660- sizeof(struct lpfc_vport));
4661+ shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
4662 if (!shost)
4663 goto out;
4664
4665@@ -2017,6 +2064,15 @@ lpfc_create_port(struct lpfc_hba *phba,
4666 error = scsi_add_host(shost, dev);
4667 if (error)
4668 goto out_put_shost;
4669+ vport->auth.challenge = NULL;
4670+ vport->auth.challenge_len = 0;
4671+ vport->auth.dh_pub_key = NULL;
4672+ vport->auth.dh_pub_key_len = 0;
4673+
4674+ INIT_WORK(&vport->sc_online_work, lpfc_fc_sc_security_online);
4675+ INIT_WORK(&vport->sc_offline_work, lpfc_fc_sc_security_offline);
4676+ INIT_LIST_HEAD(&vport->sc_users);
4677+ INIT_LIST_HEAD(&vport->sc_response_wait_queue);
4678
4679 spin_lock_irq(&phba->hbalock);
4680 list_add_tail(&vport->listentry, &phba->port_list);
4681@@ -2387,7 +2443,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
4682 * establish the host.
4683 */
4684 lpfc_get_cfgparam(phba);
4685- phba->max_vpi = LPFC_MAX_VPI;
4686+ phba->max_vpi = lpfc_hba_max_vpi(phba->pcidev->device);
4687
4688 /* Initialize timers used by driver */
4689 init_timer(&phba->hb_tmofunc);
4690@@ -2453,6 +2509,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
4691
4692 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
4693 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
4694+ phba->mbox_ext = (phba->slim2p.virt +
4695+ offsetof(struct lpfc_sli2_slim, mbx_ext_words));
4696 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
4697 phba->IOCBs = (phba->slim2p.virt +
4698 offsetof(struct lpfc_sli2_slim, IOCBs));
4699@@ -2548,11 +2606,30 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
4700 /* Initialize list to save ELS buffers */
4701 INIT_LIST_HEAD(&phba->elsbuf);
4702
4703+ /* Initialize list of sysfs mailbox commands */
4704+ INIT_LIST_HEAD(&phba->sysfs_mbox_list);
4705+ /* Initialize list of sysfs menlo commands */
4706+ INIT_LIST_HEAD(&phba->sysfs_menlo_list);
4707+
4708 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
4709 if (!vport)
4710 goto out_kthread_stop;
4711
4712 shost = lpfc_shost_from_vport(vport);
4713+
4714+ if ((lpfc_get_security_enabled)(shost)) {
4715+ unsigned long flags;
4716+ /* Triggers fcauthd to register if it is running */
4717+ fc_host_post_event(shost, fc_get_event_number(),
4718+ FCH_EVT_PORT_ONLINE, shost->host_no);
4719+ spin_lock_irqsave(&fc_security_user_lock, flags);
4720+ list_add_tail(&vport->sc_users, &fc_security_user_list);
4721+ spin_unlock_irqrestore(&fc_security_user_lock, flags);
4722+ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) {
4723+ lpfc_fc_queue_security_work(vport,
4724+ &vport->sc_online_work);
4725+ }
4726+ }
4727 phba->pport = vport;
4728 lpfc_debugfs_initialize(vport);
4729
4730@@ -2610,6 +2687,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev,
4731 phba->intr_type = INTx;
4732 }
4733
4734+ phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba);
4735+ if (!phba->dfc_host) {
4736+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4737+ "1201 Failed to allocate dfc_host \n");
4738+ error = -ENOMEM;
4739+ goto out_free_irq;
4740+ }
4741+
4742 if (lpfc_alloc_sysfs_attr(vport)) {
4743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4744 "1476 Failed to allocate sysfs attr\n");
4745@@ -2658,6 +2743,8 @@ out_remove_device:
4746 vport->load_flag |= FC_UNLOADING;
4747 spin_unlock_irq(shost->host_lock);
4748 out_free_irq:
4749+ if (phba->dfc_host)
4750+ lpfcdfc_host_del(phba->dfc_host);
4751 lpfc_stop_phba_timers(phba);
4752 phba->pport->work_port_events = 0;
4753
4754@@ -2720,6 +2807,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev
4755 struct lpfc_hba *phba = vport->phba;
4756 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
4757
4758+ /* In case PCI channel permanently disabled, rescan SCSI devices */
4759+ if (pdev->error_state == pci_channel_io_perm_failure)
4760+ lpfc_scsi_dev_rescan(phba);
4761+ lpfcdfc_host_del(phba->dfc_host);
4762+ phba->dfc_host = NULL;
4763+
4764 spin_lock_irq(&phba->hbalock);
4765 vport->load_flag |= FC_UNLOADING;
4766 spin_unlock_irq(&phba->hbalock);
4767@@ -3076,12 +3169,34 @@ lpfc_init(void)
4768 return -ENOMEM;
4769 }
4770 }
4771+ error = scsi_nl_add_driver(LPFC_NL_VENDOR_ID, &lpfc_template,
4772+ lpfc_rcv_nl_msg, lpfc_rcv_nl_event);
4773+ if (error)
4774+ goto out_release_transport;
4775+ security_work_q = create_singlethread_workqueue(security_work_q_name);
4776+ if (!security_work_q)
4777+ goto out_nl_remove_driver;
4778+ INIT_LIST_HEAD(&fc_security_user_list);
4779 error = pci_register_driver(&lpfc_driver);
4780- if (error) {
4781- fc_release_transport(lpfc_transport_template);
4782- if (lpfc_enable_npiv)
4783- fc_release_transport(lpfc_vport_transport_template);
4784- }
4785+ if (error)
4786+ goto out_destroy_workqueue;
4787+ error = lpfc_cdev_init();
4788+ if (error)
4789+ goto out_pci_unregister;
4790+
4791+ return error;
4792+
4793+out_pci_unregister:
4794+ pci_unregister_driver(&lpfc_driver);
4795+out_destroy_workqueue:
4796+ destroy_workqueue(security_work_q);
4797+ security_work_q = NULL;
4798+out_nl_remove_driver:
4799+ scsi_nl_remove_driver(LPFC_NL_VENDOR_ID);
4800+out_release_transport:
4801+ fc_release_transport(lpfc_transport_template);
4802+ if (lpfc_enable_npiv)
4803+ fc_release_transport(lpfc_vport_transport_template);
4804
4805 return error;
4806 }
4807@@ -3097,9 +3212,14 @@ static void __exit
4808 lpfc_exit(void)
4809 {
4810 pci_unregister_driver(&lpfc_driver);
4811+ if (security_work_q)
4812+ destroy_workqueue(security_work_q);
4813+ security_work_q = NULL;
4814+ scsi_nl_remove_driver(LPFC_NL_VENDOR_ID);
4815 fc_release_transport(lpfc_transport_template);
4816 if (lpfc_enable_npiv)
4817 fc_release_transport(lpfc_vport_transport_template);
4818+ lpfc_cdev_exit();
4819 }
4820
4821 module_init(lpfc_init);
4822--- /dev/null
4823+++ b/drivers/scsi/lpfc/lpfc_ioctl.c
4824@@ -0,0 +1,2519 @@
4825+/*******************************************************************
4826+ * This file is part of the Emulex Linux Device Driver for *
4827+ * Fibre Channel Host Bus Adapters. *
4828+ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
4829+ * EMULEX and SLI are trademarks of Emulex. *
4830+ * www.emulex.com *
4831+ * *
4832+ * This program is free software; you can redistribute it and/or *
4833+ * modify it under the terms of version 2 of the GNU General *
4834+ * Public License as published by the Free Software Foundation. *
4835+ * This program is distributed in the hope that it will be useful. *
4836+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
4837+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
4838+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
4839+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
4840+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
4841+ * more details, a copy of which can be found in the file COPYING *
4842+ * included with this package. *
4843+ *******************************************************************/
4844+
4845+#include <linux/delay.h>
4846+#include <linux/blkdev.h>
4847+#include <linux/interrupt.h>
4848+#include <linux/pci.h>
4849+
4850+#include <scsi/scsi_host.h>
4851+#include <scsi/scsi_transport_fc.h>
4852+
4853+#include "lpfc_hw.h"
4854+#include "lpfc_sli.h"
4855+#include "lpfc_nl.h"
4856+#include "lpfc_disc.h"
4857+#include "lpfc_scsi.h"
4858+#include "lpfc.h"
4859+#include "lpfc_crtn.h"
4860+#include "lpfc_ioctl.h"
4861+#include "lpfc_logmsg.h"
4862+#include "lpfc_vport.h"
4863+
4864+
4865+struct lpfcdfc_event {
4866+ struct list_head node;
4867+ int ref;
4868+ wait_queue_head_t wq;
4869+
4870+ /* Event type and waiter identifiers */
4871+ uint32_t type_mask;
4872+ uint32_t req_id;
4873+ uint32_t reg_id;
4874+
4875+ /* next two flags are here for the auto-delete logic */
4876+ unsigned long wait_time_stamp;
4877+ int waiting;
4878+
4879+ /* seen and not seen events */
4880+ struct list_head events_to_get;
4881+ struct list_head events_to_see;
4882+};
4883+
4884+struct event_data {
4885+ struct list_head node;
4886+ uint32_t type;
4887+ uint32_t immed_dat;
4888+ void * data;
4889+ uint32_t len;
4890+};
4891+
4892+
4893+/* values for a_topology */
4894+#define LNK_LOOP 0x1
4895+#define LNK_PUBLIC_LOOP 0x2
4896+#define LNK_FABRIC 0x3
4897+#define LNK_PT2PT 0x4
4898+
4899+/* values for a_linkState */
4900+#define LNK_DOWN 0x1
4901+#define LNK_UP 0x2
4902+#define LNK_FLOGI 0x3
4903+#define LNK_DISCOVERY 0x4
4904+#define LNK_REDISCOVERY 0x5
4905+#define LNK_READY 0x6
4906+
4907+struct lpfcdfc_host {
4908+ struct list_head node;
4909+ int inst;
4910+ struct lpfc_hba * phba;
4911+ struct lpfc_vport *vport;
4912+ struct Scsi_Host * host;
4913+ struct pci_dev * dev;
4914+ void (*base_ct_unsol_event)(struct lpfc_hba *,
4915+ struct lpfc_sli_ring *,
4916+ struct lpfc_iocbq *);
4917+ /* Threads waiting for async event */
4918+ struct list_head ev_waiters;
4919+ uint32_t blocked;
4920+ uint32_t ref_count;
4921+};
4922+
4923+
4924+
4925+
4926+static void lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba *,
4927+ struct lpfc_iocbq *, struct lpfc_iocbq *);
4928+
4929+static struct lpfc_dmabufext *
4930+dfc_cmd_data_alloc(struct lpfc_hba *, char *,
4931+ struct ulp_bde64 *, uint32_t);
4932+static int dfc_cmd_data_free(struct lpfc_hba *, struct lpfc_dmabufext *);
4933+static int dfc_rsp_data_copy(struct lpfc_hba *, uint8_t *,
4934+ struct lpfc_dmabufext *,
4935+ uint32_t);
4936+static int lpfc_issue_ct_rsp(struct lpfc_hba *, uint32_t, struct lpfc_dmabuf *,
4937+ struct lpfc_dmabufext *);
4938+
4939+static struct lpfcdfc_host * lpfcdfc_host_from_hba(struct lpfc_hba *);
4940+
4941+static DEFINE_MUTEX(lpfcdfc_lock);
4942+
4943+static struct list_head lpfcdfc_hosts = LIST_HEAD_INIT(lpfcdfc_hosts);
4944+
4945+static int lpfcdfc_major = 0;
4946+
4947+static int
4948+lpfc_ioctl_hba_rnid(struct lpfc_hba * phba,
4949+ struct lpfcCmdInput * cip,
4950+ void *dataout)
4951+{
4952+ struct nport_id idn;
4953+ struct lpfc_sli *psli;
4954+ struct lpfc_iocbq *cmdiocbq = NULL;
4955+ struct lpfc_iocbq *rspiocbq = NULL;
4956+ RNID *prsp;
4957+ uint32_t *pcmd;
4958+ uint32_t *psta;
4959+ IOCB_t *rsp;
4960+ struct lpfc_sli_ring *pring;
4961+ void *context2;
4962+ int i0;
4963+ int rtnbfrsiz;
4964+ struct lpfc_nodelist *pndl;
4965+ int rc = 0;
4966+
4967+ psli = &phba->sli;
4968+ pring = &psli->ring[LPFC_ELS_RING];
4969+
4970+ if (copy_from_user((uint8_t *) &idn, (void __user *) cip->lpfc_arg1,
4971+ sizeof(struct nport_id))) {
4972+ rc = EIO;
4973+ return rc;
4974+ }
4975+
4976+ if (idn.idType == LPFC_WWNN_TYPE)
4977+ pndl = lpfc_findnode_wwnn(phba->pport,
4978+ (struct lpfc_name *) idn.wwpn);
4979+ else
4980+ pndl = lpfc_findnode_wwpn(phba->pport,
4981+ (struct lpfc_name *) idn.wwpn);
4982+
4983+ if (!pndl || !NLP_CHK_NODE_ACT(pndl))
4984+ return ENODEV;
4985+
4986+ for (i0 = 0;
4987+ i0 < 10 && (pndl->nlp_flag & NLP_ELS_SND_MASK) == NLP_RNID_SND;
4988+ i0++) {
4989+ mdelay(1000);
4990+ }
4991+
4992+ if (i0 == 10) {
4993+ pndl->nlp_flag &= ~NLP_RNID_SND;
4994+ return EBUSY;
4995+ }
4996+
4997+ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, (2 * sizeof(uint32_t)), 0,
4998+ pndl, pndl->nlp_DID, ELS_CMD_RNID);
4999+ if (!cmdiocbq)
5000+ return ENOMEM;
5001+
5002+ /*
5003+ * Context2 is used by prep/free to locate cmd and rsp buffers,
5004+ * but context2 is also used by iocb_wait to hold a rspiocb ptr.
5005+ * The rsp iocbq can be returned from the completion routine for
5006+ * iocb_wait, so save the prep/free value locally . It will be
5007+ * restored after returning from iocb_wait.
5008+ */
5009+ context2 = cmdiocbq->context2;
5010+
5011+ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL) {
5012+ rc = ENOMEM;
5013+ goto sndrndqwt;
5014+ }
5015+ rsp = &(rspiocbq->iocb);
5016+
5017+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *) cmdiocbq->context2)->virt);
5018+ *pcmd++ = ELS_CMD_RNID;
5019+
5020+ memset((void *) pcmd, 0, sizeof (RNID));
5021+ ((RNID *) pcmd)->Format = 0;
5022+ ((RNID *) pcmd)->Format = RNID_TOPOLOGY_DISC;
5023+ cmdiocbq->context1 = NULL;
5024+ cmdiocbq->context2 = NULL;
5025+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5026+
5027+ pndl->nlp_flag |= NLP_RNID_SND;
5028+ cmdiocbq->iocb.ulpTimeout = (phba->fc_ratov * 2) + 3 ;
5029+
5030+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
5031+ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
5032+ pndl->nlp_flag &= ~NLP_RNID_SND;
5033+ cmdiocbq->context2 = context2;
5034+
5035+ if (rc == IOCB_TIMEDOUT) {
5036+ lpfc_sli_release_iocbq(phba, rspiocbq);
5037+ cmdiocbq->context1 = NULL;
5038+ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
5039+ return EIO;
5040+ }
5041+
5042+ if (rc != IOCB_SUCCESS) {
5043+ rc = EIO;
5044+ goto sndrndqwt;
5045+ }
5046+
5047+ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
5048+ struct lpfc_dmabuf *buf_ptr1, *buf_ptr;
5049+ buf_ptr1 = (struct lpfc_dmabuf *)(cmdiocbq->context2);
5050+ buf_ptr = list_entry(buf_ptr1->list.next, struct lpfc_dmabuf,
5051+ list);
5052+ psta = (uint32_t*)buf_ptr->virt;
5053+ prsp = (RNID *) (psta + 1); /* then rnid response data */
5054+ rtnbfrsiz = prsp->CommonLen + prsp->SpecificLen +
5055+ sizeof (uint32_t);
5056+ memcpy((uint8_t *) dataout, (uint8_t *) psta, rtnbfrsiz);
5057+
5058+ if (rtnbfrsiz > cip->lpfc_outsz)
5059+ rtnbfrsiz = cip->lpfc_outsz;
5060+ if (copy_to_user
5061+ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz,
5062+ sizeof (int)))
5063+ rc = EIO;
5064+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
5065+ uint8_t ls_rjt[8];
5066+ uint32_t *ls_rjtrsp;
5067+
5068+ ls_rjtrsp = (uint32_t*)(ls_rjt + 4);
5069+
5070+ /* construct the LS_RJT payload */
5071+ ls_rjt[0] = 0x01;
5072+ ls_rjt[1] = 0x00;
5073+ ls_rjt[2] = 0x00;
5074+ ls_rjt[3] = 0x00;
5075+
5076+ *ls_rjtrsp = be32_to_cpu(rspiocbq->iocb.un.ulpWord[4]);
5077+ rtnbfrsiz = 8;
5078+ memcpy((uint8_t *) dataout, (uint8_t *) ls_rjt, rtnbfrsiz);
5079+ if (copy_to_user
5080+ ((void __user *) cip->lpfc_arg2, (uint8_t *) & rtnbfrsiz,
5081+ sizeof (int)))
5082+ rc = EIO;
5083+ } else
5084+ rc = EACCES;
5085+
5086+sndrndqwt:
5087+ if (cmdiocbq)
5088+ lpfc_els_free_iocb(phba, cmdiocbq);
5089+
5090+ if (rspiocbq)
5091+ lpfc_sli_release_iocbq(phba, rspiocbq);
5092+
5093+ return rc;
5094+}
5095+
5096+static void
5097+lpfc_ioctl_timeout_iocb_cmpl(struct lpfc_hba * phba,
5098+ struct lpfc_iocbq * cmd_iocb_q,
5099+ struct lpfc_iocbq * rsp_iocb_q)
5100+{
5101+ struct lpfc_timedout_iocb_ctxt *iocb_ctxt = cmd_iocb_q->context1;
5102+
5103+ if (!iocb_ctxt) {
5104+ if (cmd_iocb_q->context2)
5105+ lpfc_els_free_iocb(phba, cmd_iocb_q);
5106+ else
5107+ lpfc_sli_release_iocbq(phba,cmd_iocb_q);
5108+ return;
5109+ }
5110+
5111+ if (iocb_ctxt->outdmp)
5112+ dfc_cmd_data_free(phba, iocb_ctxt->outdmp);
5113+
5114+ if (iocb_ctxt->indmp)
5115+ dfc_cmd_data_free(phba, iocb_ctxt->indmp);
5116+
5117+ if (iocb_ctxt->mp) {
5118+ lpfc_mbuf_free(phba,
5119+ iocb_ctxt->mp->virt,
5120+ iocb_ctxt->mp->phys);
5121+ kfree(iocb_ctxt->mp);
5122+ }
5123+
5124+ if (iocb_ctxt->bmp) {
5125+ lpfc_mbuf_free(phba,
5126+ iocb_ctxt->bmp->virt,
5127+ iocb_ctxt->bmp->phys);
5128+ kfree(iocb_ctxt->bmp);
5129+ }
5130+
5131+ lpfc_sli_release_iocbq(phba,cmd_iocb_q);
5132+
5133+ if (iocb_ctxt->rspiocbq)
5134+ lpfc_sli_release_iocbq(phba, iocb_ctxt->rspiocbq);
5135+
5136+ kfree(iocb_ctxt);
5137+}
5138+
5139+
5140+static int
5141+lpfc_ioctl_send_els(struct lpfc_hba * phba,
5142+ struct lpfcCmdInput * cip, void *dataout)
5143+{
5144+ struct lpfc_sli *psli = &phba->sli;
5145+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
5146+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
5147+ struct lpfc_dmabufext *pcmdext = NULL, *prspext = NULL;
5148+ struct lpfc_nodelist *pndl;
5149+ struct ulp_bde64 *bpl;
5150+ IOCB_t *rsp;
5151+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist = NULL;
5152+ uint16_t rpi = 0;
5153+ struct nport_id destID;
5154+ int rc = 0;
5155+ uint32_t cmdsize;
5156+ uint32_t rspsize;
5157+ uint32_t elscmd;
5158+ int iocb_status;
5159+
5160+ elscmd = *(uint32_t *)cip->lpfc_arg2;
5161+ cmdsize = cip->lpfc_arg4;
5162+ rspsize = cip->lpfc_outsz;
5163+
5164+ if (copy_from_user((uint8_t *)&destID, (void __user *)cip->lpfc_arg1,
5165+ sizeof(struct nport_id)))
5166+ return EIO;
5167+
5168+ if ((rspiocbq = lpfc_sli_get_iocbq(phba)) == NULL)
5169+ return ENOMEM;
5170+
5171+ rsp = &rspiocbq->iocb;
5172+
5173+ if (destID.idType == 0)
5174+ pndl = lpfc_findnode_wwpn(phba->pport,
5175+ (struct lpfc_name *)&destID.wwpn);
5176+ else {
5177+ destID.d_id = (destID.d_id & Mask_DID);
5178+ pndl = lpfc_findnode_did(phba->pport, destID.d_id);
5179+ }
5180+
5181+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
5182+ if (destID.idType == 0) {
5183+ lpfc_sli_release_iocbq(phba, rspiocbq);
5184+ return ENODEV;
5185+ }
5186+ if (!pndl) {
5187+ pndl = kmalloc(sizeof (struct lpfc_nodelist),
5188+ GFP_KERNEL);
5189+ if (!pndl) {
5190+ lpfc_sli_release_iocbq(phba, rspiocbq);
5191+ return ENODEV;
5192+ }
5193+ lpfc_nlp_init(phba->pport, pndl, destID.d_id);
5194+ lpfc_nlp_set_state(phba->pport, pndl, NLP_STE_NPR_NODE);
5195+ } else {
5196+ pndl = lpfc_enable_node(phba->pport, pndl,
5197+ NLP_STE_NPR_NODE);
5198+ if (!pndl) {
5199+ lpfc_sli_release_iocbq(phba, rspiocbq);
5200+ return ENODEV;
5201+ }
5202+ }
5203+ } else {
5204+ lpfc_nlp_get(pndl);
5205+ rpi = pndl->nlp_rpi;
5206+ }
5207+
5208+ cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, pndl,
5209+ pndl->nlp_DID, elscmd);
5210+
5211+ /* release the new pndl once the iocb complete */
5212+ lpfc_nlp_put(pndl);
5213+
5214+ if (cmdiocbq == NULL) {
5215+ lpfc_sli_release_iocbq(phba, rspiocbq);
5216+ return EIO;
5217+ }
5218+
5219+ pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
5220+ prsp = (struct lpfc_dmabuf *) pcmd->list.next;
5221+
5222+ /*
5223+ * If we exceed the size of the allocated mbufs we need to
5224+ * free them and allocate our own.
5225+ */
5226+ if ((cmdsize > LPFC_BPL_SIZE) || (rspsize > LPFC_BPL_SIZE)) {
5227+ lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
5228+ kfree(pcmd);
5229+ lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
5230+ kfree(prsp);
5231+ cmdiocbq->context2 = NULL;
5232+
5233+ pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
5234+ bpl = (struct ulp_bde64 *) pbuflist->virt;
5235+ pcmdext = dfc_cmd_data_alloc(phba, cip->lpfc_arg2,
5236+ bpl, cmdsize);
5237+ if (!pcmdext) {
5238+ lpfc_els_free_iocb(phba, cmdiocbq);
5239+ lpfc_sli_release_iocbq(phba, rspiocbq);
5240+ return ENOMEM;
5241+ }
5242+ bpl += pcmdext->flag;
5243+ prspext = dfc_cmd_data_alloc(phba, NULL, bpl, rspsize);
5244+ if (!prspext) {
5245+ dfc_cmd_data_free(phba, pcmdext);
5246+ lpfc_els_free_iocb(phba, cmdiocbq);
5247+ lpfc_sli_release_iocbq(phba, rspiocbq);
5248+ return ENOMEM;
5249+ }
5250+ } else {
5251+ /* Copy the command from user space */
5252+ if (copy_from_user((uint8_t *) pcmd->virt,
5253+ (void __user *) cip->lpfc_arg2,
5254+ cmdsize)) {
5255+ lpfc_els_free_iocb(phba, cmdiocbq);
5256+ lpfc_sli_release_iocbq(phba, rspiocbq);
5257+ return EIO;
5258+ }
5259+ }
5260+
5261+ cmdiocbq->iocb.ulpContext = rpi;
5262+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5263+ cmdiocbq->context1 = NULL;
5264+ cmdiocbq->context2 = NULL;
5265+
5266+ iocb_status = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
5267+ (phba->fc_ratov*2) + LPFC_DRVR_TIMEOUT);
5268+ rc = iocb_status;
5269+
5270+ if (rc == IOCB_SUCCESS) {
5271+ if (rsp->ulpStatus == IOSTAT_SUCCESS) {
5272+ if (rspsize < (rsp->un.ulpWord[0] & 0xffffff)) {
5273+ rc = ERANGE;
5274+ } else {
5275+ rspsize = rsp->un.ulpWord[0] & 0xffffff;
5276+ if (pbuflist) {
5277+ if (dfc_rsp_data_copy(
5278+ phba,
5279+ (uint8_t *) cip->lpfc_dataout,
5280+ prspext,
5281+ rspsize)) {
5282+ rc = EIO;
5283+ } else {
5284+ cip->lpfc_outsz = 0;
5285+ }
5286+ } else {
5287+ if (copy_to_user( (void __user *)
5288+ cip->lpfc_dataout,
5289+ (uint8_t *) prsp->virt,
5290+ rspsize)) {
5291+ rc = EIO;
5292+ } else {
5293+ cip->lpfc_outsz = 0;
5294+ }
5295+ }
5296+ }
5297+ } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
5298+ uint8_t ls_rjt[8];
5299+
5300+ /* construct the LS_RJT payload */
5301+ ls_rjt[0] = 0x01;
5302+ ls_rjt[1] = 0x00;
5303+ ls_rjt[2] = 0x00;
5304+ ls_rjt[3] = 0x00;
5305+ memcpy(&ls_rjt[4], (uint8_t *) &rsp->un.ulpWord[4],
5306+ sizeof(uint32_t));
5307+
5308+ if (rspsize < 8)
5309+ rc = ERANGE;
5310+ else
5311+ rspsize = 8;
5312+
5313+ memcpy(dataout, ls_rjt, rspsize);
5314+ } else
5315+ rc = EIO;
5316+
5317+ if (copy_to_user((void __user *)cip->lpfc_arg3,
5318+ (uint8_t *)&rspsize, sizeof(uint32_t)))
5319+ rc = EIO;
5320+ } else {
5321+ rc = EIO;
5322+ }
5323+
5324+ if (pbuflist) {
5325+ dfc_cmd_data_free(phba, pcmdext);
5326+ dfc_cmd_data_free(phba, prspext);
5327+ } else
5328+ cmdiocbq->context2 = (uint8_t *) pcmd;
5329+
5330+ if (iocb_status != IOCB_TIMEDOUT)
5331+ lpfc_els_free_iocb(phba, cmdiocbq);
5332+
5333+ lpfc_sli_release_iocbq(phba, rspiocbq);
5334+ return rc;
5335+}
5336+
5337+static int
5338+lpfc_ioctl_send_mgmt_rsp(struct lpfc_hba * phba,
5339+ struct lpfcCmdInput * cip)
5340+{
5341+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
5342+ struct ulp_bde64 *bpl;
5343+ struct lpfc_dmabuf *bmp = NULL;
5344+ struct lpfc_dmabufext *indmp = NULL;
5345+ uint32_t tag = (uint32_t)cip->lpfc_flag; /* XRI for XMIT_SEQUENCE */
5346+ unsigned long reqbfrcnt = (unsigned long)cip->lpfc_arg2;
5347+ int rc = 0;
5348+ unsigned long iflag;
5349+
5350+ if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
5351+ rc = ERANGE;
5352+ return rc;
5353+ }
5354+
5355+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
5356+ if (!bmp) {
5357+ rc = ENOMEM;
5358+ goto send_mgmt_rsp_exit;
5359+ }
5360+ spin_lock_irqsave(shost->host_lock, iflag);
5361+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5362+ spin_unlock_irqrestore(shost->host_lock, iflag); /* remove */
5363+ if (!bmp->virt) {
5364+ rc = ENOMEM;
5365+ goto send_mgmt_rsp_free_bmp;
5366+ }
5367+
5368+ INIT_LIST_HEAD(&bmp->list);
5369+ bpl = (struct ulp_bde64 *) bmp->virt;
5370+
5371+ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt);
5372+ if (!indmp) {
5373+ rc = ENOMEM;
5374+ goto send_mgmt_rsp_free_bmpvirt;
5375+ }
5376+ rc = lpfc_issue_ct_rsp(phba, tag, bmp, indmp);
5377+ if (rc) {
5378+ if (rc == IOCB_TIMEDOUT)
5379+ rc = ETIMEDOUT;
5380+ else if (rc == IOCB_ERROR)
5381+ rc = EACCES;
5382+ }
5383+
5384+ dfc_cmd_data_free(phba, indmp);
5385+send_mgmt_rsp_free_bmpvirt:
5386+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5387+send_mgmt_rsp_free_bmp:
5388+ kfree(bmp);
5389+send_mgmt_rsp_exit:
5390+ return rc;
5391+}
5392+
5393+static int
5394+lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba,
5395+ struct lpfcCmdInput * cip, void *dataout)
5396+{
5397+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
5398+ struct lpfc_nodelist *pndl = NULL;
5399+ struct ulp_bde64 *bpl = NULL;
5400+ struct lpfc_name findwwn;
5401+ uint32_t finddid, timeout;
5402+ struct lpfc_iocbq *cmdiocbq = NULL, *rspiocbq = NULL;
5403+ struct lpfc_dmabufext *indmp = NULL, *outdmp = NULL;
5404+ IOCB_t *cmd = NULL, *rsp = NULL;
5405+ struct lpfc_dmabuf *bmp = NULL;
5406+ struct lpfc_sli *psli = NULL;
5407+ struct lpfc_sli_ring *pring = NULL;
5408+ int i0 = 0, rc = 0, reqbfrcnt, snsbfrcnt;
5409+ struct lpfc_timedout_iocb_ctxt *iocb_ctxt;
5410+
5411+ psli = &phba->sli;
5412+ pring = &psli->ring[LPFC_ELS_RING];
5413+
5414+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
5415+ rc = EACCES;
5416+ goto send_mgmt_cmd_exit;
5417+ }
5418+
5419+ reqbfrcnt = cip->lpfc_arg4;
5420+ snsbfrcnt = cip->lpfc_arg5;
5421+
5422+ if (!reqbfrcnt || !snsbfrcnt
5423+ || (reqbfrcnt + snsbfrcnt > 80 * BUF_SZ_4K)) {
5424+ rc = ERANGE;
5425+ goto send_mgmt_cmd_exit;
5426+ }
5427+
5428+ if (phba->pport->port_state != LPFC_VPORT_READY) {
5429+ rc = ENODEV;
5430+ goto send_mgmt_cmd_exit;
5431+ }
5432+
5433+ if (cip->lpfc_cmd == LPFC_HBA_SEND_MGMT_CMD) {
5434+ rc = copy_from_user(&findwwn, (void __user *)cip->lpfc_arg3,
5435+ sizeof(struct lpfc_name));
5436+ if (rc) {
5437+ rc = EIO;
5438+ goto send_mgmt_cmd_exit;
5439+ }
5440+ pndl = lpfc_findnode_wwpn(phba->pport, &findwwn);
5441+ /* Do additional get to pndl found so that at the end of the
5442+ * function we can do unditional lpfc_nlp_put on it.
5443+ */
5444+ if (pndl && NLP_CHK_NODE_ACT(pndl))
5445+ lpfc_nlp_get(pndl);
5446+ } else {
5447+ finddid = (uint32_t)(unsigned long)cip->lpfc_arg3;
5448+ pndl = lpfc_findnode_did(phba->pport, finddid);
5449+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
5450+ if (phba->pport->fc_flag & FC_FABRIC) {
5451+ if (!pndl) {
5452+ pndl = kmalloc(sizeof
5453+ (struct lpfc_nodelist),
5454+ GFP_KERNEL);
5455+ if (!pndl) {
5456+ rc = ENODEV;
5457+ goto send_mgmt_cmd_exit;
5458+ }
5459+ lpfc_nlp_init(phba->pport, pndl,
5460+ finddid);
5461+ lpfc_nlp_set_state(phba->pport,
5462+ pndl, NLP_STE_PLOGI_ISSUE);
5463+ /* Indicate free ioctl allocated
5464+ * memory for ndlp after it's done
5465+ */
5466+ NLP_SET_FREE_REQ(pndl);
5467+ } else
5468+ lpfc_enable_node(phba->pport,
5469+ pndl, NLP_STE_PLOGI_ISSUE);
5470+
5471+ if (lpfc_issue_els_plogi(phba->pport,
5472+ pndl->nlp_DID, 0)) {
5473+ rc = ENODEV;
5474+ goto send_mgmt_cmd_free_pndl_exit;
5475+ }
5476+
5477+ /* Allow the node to complete discovery */
5478+ while (i0++ < 4) {
5479+ if (pndl->nlp_state ==
5480+ NLP_STE_UNMAPPED_NODE)
5481+ break;
5482+ msleep(500);
5483+ }
5484+
5485+ if (i0 == 4) {
5486+ rc = ENODEV;
5487+ goto send_mgmt_cmd_free_pndl_exit;
5488+ }
5489+ } else {
5490+ rc = ENODEV;
5491+ goto send_mgmt_cmd_exit;
5492+ }
5493+ } else
5494+ /* Do additional get to pndl found so at the end of
5495+ * the function we can do unconditional lpfc_nlp_put.
5496+ */
5497+ lpfc_nlp_get(pndl);
5498+ }
5499+
5500+ if (!pndl || !NLP_CHK_NODE_ACT(pndl)) {
5501+ rc = ENODEV;
5502+ goto send_mgmt_cmd_exit;
5503+ }
5504+
5505+ if (pndl->nlp_flag & NLP_ELS_SND_MASK) {
5506+ rc = ENODEV;
5507+ goto send_mgmt_cmd_free_pndl_exit;
5508+ }
5509+
5510+ spin_lock_irq(shost->host_lock);
5511+ cmdiocbq = lpfc_sli_get_iocbq(phba);
5512+ if (!cmdiocbq) {
5513+ rc = ENOMEM;
5514+ spin_unlock_irq(shost->host_lock);
5515+ goto send_mgmt_cmd_free_pndl_exit;
5516+ }
5517+ cmd = &cmdiocbq->iocb;
5518+
5519+ rspiocbq = lpfc_sli_get_iocbq(phba);
5520+ if (!rspiocbq) {
5521+ rc = ENOMEM;
5522+ goto send_mgmt_cmd_free_cmdiocbq;
5523+ }
5524+ spin_unlock_irq(shost->host_lock);
5525+
5526+ rsp = &rspiocbq->iocb;
5527+
5528+ bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
5529+ if (!bmp) {
5530+ rc = ENOMEM;
5531+ spin_lock_irq(shost->host_lock);
5532+ goto send_mgmt_cmd_free_rspiocbq;
5533+ }
5534+
5535+ spin_lock_irq(shost->host_lock);
5536+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5537+ if (!bmp->virt) {
5538+ rc = ENOMEM;
5539+ goto send_mgmt_cmd_free_bmp;
5540+ }
5541+ spin_unlock_irq(shost->host_lock);
5542+
5543+ INIT_LIST_HEAD(&bmp->list);
5544+ bpl = (struct ulp_bde64 *) bmp->virt;
5545+ indmp = dfc_cmd_data_alloc(phba, cip->lpfc_arg1, bpl, reqbfrcnt);
5546+ if (!indmp) {
5547+ rc = ENOMEM;
5548+ spin_lock_irq(shost->host_lock);
5549+ goto send_mgmt_cmd_free_bmpvirt;
5550+ }
5551+
5552+ /* flag contains total number of BPLs for xmit */
5553+ bpl += indmp->flag;
5554+
5555+ outdmp = dfc_cmd_data_alloc(phba, NULL, bpl, snsbfrcnt);
5556+ if (!outdmp) {
5557+ rc = ENOMEM;
5558+ spin_lock_irq(shost->host_lock);
5559+ goto send_mgmt_cmd_free_indmp;
5560+ }
5561+
5562+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5563+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5564+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5565+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5566+ cmd->un.genreq64.bdl.bdeSize =
5567+ (outdmp->flag + indmp->flag) * sizeof (struct ulp_bde64);
5568+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5569+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5570+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5571+ cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
5572+ cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
5573+ cmd->ulpBdeCount = 1;
5574+ cmd->ulpLe = 1;
5575+ cmd->ulpClass = CLASS3;
5576+ cmd->ulpContext = pndl->nlp_rpi;
5577+ cmd->ulpOwner = OWN_CHIP;
5578+ cmdiocbq->vport = phba->pport;
5579+ cmdiocbq->context1 = NULL;
5580+ cmdiocbq->context2 = NULL;
5581+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5582+
5583+ if (cip->lpfc_flag == 0 )
5584+ timeout = phba->fc_ratov * 2 ;
5585+ else
5586+ timeout = cip->lpfc_flag;
5587+
5588+ cmd->ulpTimeout = timeout;
5589+
5590+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
5591+ timeout + LPFC_DRVR_TIMEOUT);
5592+
5593+ if (rc == IOCB_TIMEDOUT) {
5594+ lpfc_sli_release_iocbq(phba, rspiocbq);
5595+ iocb_ctxt = kmalloc(sizeof(struct lpfc_timedout_iocb_ctxt),
5596+ GFP_KERNEL);
5597+ if (!iocb_ctxt) {
5598+ rc = EACCES;
5599+ goto send_mgmt_cmd_free_pndl_exit;
5600+ }
5601+
5602+ cmdiocbq->context1 = iocb_ctxt;
5603+ cmdiocbq->context2 = NULL;
5604+ iocb_ctxt->rspiocbq = NULL;
5605+ iocb_ctxt->mp = NULL;
5606+ iocb_ctxt->bmp = bmp;
5607+ iocb_ctxt->outdmp = outdmp;
5608+ iocb_ctxt->lpfc_cmd = NULL;
5609+ iocb_ctxt->indmp = indmp;
5610+
5611+ cmdiocbq->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
5612+ rc = EACCES;
5613+ goto send_mgmt_cmd_free_pndl_exit;
5614+ }
5615+
5616+ if (rc != IOCB_SUCCESS) {
5617+ rc = EACCES;
5618+ goto send_mgmt_cmd_free_outdmp;
5619+ }
5620+
5621+ if (rsp->ulpStatus) {
5622+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5623+ switch (rsp->un.ulpWord[4] & 0xff) {
5624+ case IOERR_SEQUENCE_TIMEOUT:
5625+ rc = ETIMEDOUT;
5626+ break;
5627+ case IOERR_INVALID_RPI:
5628+ rc = EFAULT;
5629+ break;
5630+ default:
5631+ rc = EACCES;
5632+ break;
5633+ }
5634+ goto send_mgmt_cmd_free_outdmp;
5635+ }
5636+ } else
5637+ outdmp->flag = rsp->un.genreq64.bdl.bdeSize;
5638+
5639+ /* Copy back response data */
5640+ if (outdmp->flag > snsbfrcnt) {
5641+ rc = ERANGE;
5642+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
5643+ "1209 C_CT Request error Data: x%x x%x\n",
5644+ outdmp->flag, BUF_SZ_4K);
5645+ goto send_mgmt_cmd_free_outdmp;
5646+ }
5647+
5648+ /* copy back size of response, and response itself */
5649+ memcpy(dataout, &outdmp->flag, sizeof (int));
5650+ rc = dfc_rsp_data_copy (phba, cip->lpfc_arg2, outdmp, outdmp->flag);
5651+ if (rc)
5652+ rc = EIO;
5653+
5654+send_mgmt_cmd_free_outdmp:
5655+ spin_lock_irq(shost->host_lock);
5656+ dfc_cmd_data_free(phba, outdmp);
5657+send_mgmt_cmd_free_indmp:
5658+ dfc_cmd_data_free(phba, indmp);
5659+send_mgmt_cmd_free_bmpvirt:
5660+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5661+send_mgmt_cmd_free_bmp:
5662+ kfree(bmp);
5663+send_mgmt_cmd_free_rspiocbq:
5664+ lpfc_sli_release_iocbq(phba, rspiocbq);
5665+send_mgmt_cmd_free_cmdiocbq:
5666+ lpfc_sli_release_iocbq(phba, cmdiocbq);
5667+ spin_unlock_irq(shost->host_lock);
5668+send_mgmt_cmd_free_pndl_exit:
5669+ lpfc_nlp_put(pndl);
5670+send_mgmt_cmd_exit:
5671+ return rc;
5672+}
5673+
5674+static inline struct lpfcdfc_event *
5675+lpfcdfc_event_new(uint32_t ev_mask,
5676+ int ev_reg_id,
5677+ uint32_t ev_req_id)
5678+{
5679+ struct lpfcdfc_event * evt = kzalloc(sizeof(*evt), GFP_KERNEL);
5680+ if (evt == NULL)
5681+ return NULL;
5682+
5683+ INIT_LIST_HEAD(&evt->events_to_get);
5684+ INIT_LIST_HEAD(&evt->events_to_see);
5685+ evt->type_mask = ev_mask;
5686+ evt->req_id = ev_req_id;
5687+ evt->reg_id = ev_reg_id;
5688+ evt->wait_time_stamp = jiffies;
5689+ init_waitqueue_head(&evt->wq);
5690+
5691+ return evt;
5692+}
5693+
5694+static inline void lpfcdfc_event_free(struct lpfcdfc_event * evt)
5695+{
5696+ struct event_data * ed;
5697+
5698+ list_del(&evt->node);
5699+
5700+ while(!list_empty(&evt->events_to_get)) {
5701+ ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
5702+ list_del(&ed->node);
5703+ kfree(ed->data);
5704+ kfree(ed);
5705+ }
5706+
5707+ while(!list_empty(&evt->events_to_see)) {
5708+ ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
5709+ list_del(&ed->node);
5710+ kfree(ed->data);
5711+ kfree(ed);
5712+ }
5713+
5714+ kfree(evt);
5715+}
5716+
5717+#define lpfcdfc_event_ref(evt) evt->ref++
5718+
5719+#define lpfcdfc_event_unref(evt) \
5720+ if (--evt->ref < 0) \
5721+ lpfcdfc_event_free(evt);
5722+
5723+static int
5724+lpfc_ioctl_hba_get_event(struct lpfc_hba * phba,
5725+ struct lpfcCmdInput * cip,
5726+ void **dataout, int *data_size)
5727+{
5728+ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 &
5729+ FC_REG_EVENT_MASK);
5730+ int ev_reg_id = (uint32_t) cip->lpfc_flag;
5731+ uint32_t ev_req_id = 0;
5732+ struct lpfcdfc_host * dfchba;
5733+ struct lpfcdfc_event * evt;
5734+ struct event_data * evt_dat = NULL;
5735+ int ret_val = 0;
5736+
5737+ /* All other events supported through NET_LINK_EVENTs */
5738+ if (ev_mask != FC_REG_CT_EVENT)
5739+ return ENOENT;
5740+
5741+ mutex_lock(&lpfcdfc_lock);
5742+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node)
5743+ if (dfchba->phba == phba)
5744+ break;
5745+ mutex_unlock(&lpfcdfc_lock);
5746+
5747+ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
5748+
5749+ if ((ev_mask == FC_REG_CT_EVENT) &&
5750+ copy_from_user(&ev_req_id, (void __user *)cip->lpfc_arg2,
5751+ sizeof (uint32_t)))
5752+ return EIO;
5753+
5754+ mutex_lock(&lpfcdfc_lock);
5755+ list_for_each_entry(evt, &dfchba->ev_waiters, node)
5756+ if (evt->reg_id == ev_reg_id) {
5757+ if(list_empty(&evt->events_to_get))
5758+ break;
5759+ lpfcdfc_event_ref(evt);
5760+ evt->wait_time_stamp = jiffies;
5761+ evt_dat = list_entry(evt->events_to_get.prev,
5762+ struct event_data, node);
5763+ list_del(&evt_dat->node);
5764+ break;
5765+ }
5766+ mutex_unlock(&lpfcdfc_lock);
5767+
5768+ if (evt_dat == NULL)
5769+ return ENOENT;
5770+
5771+ BUG_ON((ev_mask & evt_dat->type) == 0);
5772+
5773+ if (evt_dat->len > cip->lpfc_outsz)
5774+ evt_dat->len = cip->lpfc_outsz;
5775+
5776+ if (copy_to_user((void __user *)cip->lpfc_arg2, &evt_dat->immed_dat,
5777+ sizeof (uint32_t)) ||
5778+ copy_to_user((void __user *)cip->lpfc_arg1, &evt_dat->len,
5779+ sizeof (uint32_t))) {
5780+ ret_val = EIO;
5781+ goto error_get_event_exit;
5782+ }
5783+
5784+ if (evt_dat->len > 0) {
5785+ *data_size = evt_dat->len;
5786+ *dataout = kmalloc(*data_size, GFP_KERNEL);
5787+ if (*dataout)
5788+ memcpy(*dataout, evt_dat->data, *data_size);
5789+ else
5790+ *data_size = 0;
5791+
5792+ } else
5793+ *data_size = 0;
5794+ ret_val = 0;
5795+
5796+error_get_event_exit:
5797+
5798+ kfree(evt_dat->data);
5799+ kfree(evt_dat);
5800+ mutex_lock(&lpfcdfc_lock);
5801+ lpfcdfc_event_unref(evt);
5802+ mutex_unlock(&lpfcdfc_lock);
5803+
5804+ return ret_val;
5805+}
5806+
5807+static int
5808+lpfc_ioctl_hba_set_event(struct lpfc_hba * phba,
5809+ struct lpfcCmdInput * cip)
5810+{
5811+ uint32_t ev_mask = ((uint32_t)(unsigned long)cip->lpfc_arg3 &
5812+ FC_REG_EVENT_MASK);
5813+ int ev_reg_id = cip->lpfc_flag;
5814+ uint32_t ev_req_id = 0;
5815+
5816+ struct lpfcdfc_host * dfchba;
5817+ struct lpfcdfc_event * evt;
5818+
5819+ int ret_val = 0;
5820+
5821+ /* All other events supported through NET_LINK_EVENTs */
5822+ if (ev_mask != FC_REG_CT_EVENT)
5823+ return ENOENT;
5824+
5825+ mutex_lock(&lpfcdfc_lock);
5826+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
5827+ if (dfchba->phba == phba)
5828+ break;
5829+ }
5830+ mutex_unlock(&lpfcdfc_lock);
5831+ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
5832+
5833+ if (ev_mask == FC_REG_CT_EVENT)
5834+ ev_req_id = ((uint32_t)(unsigned long)cip->lpfc_arg2);
5835+
5836+ mutex_lock(&lpfcdfc_lock);
5837+ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
5838+ if (evt->reg_id == ev_reg_id) {
5839+ lpfcdfc_event_ref(evt);
5840+ evt->wait_time_stamp = jiffies;
5841+ break;
5842+ }
5843+ }
5844+ mutex_unlock(&lpfcdfc_lock);
5845+
5846+ if (&evt->node == &dfchba->ev_waiters) {
5847+ /* no event waiting struct yet - first call */
5848+ evt = lpfcdfc_event_new(ev_mask, ev_reg_id, ev_req_id);
5849+ if (evt == NULL)
5850+ return ENOMEM;
5851+
5852+ mutex_lock(&lpfcdfc_lock);
5853+ list_add(&evt->node, &dfchba->ev_waiters);
5854+ lpfcdfc_event_ref(evt);
5855+ mutex_unlock(&lpfcdfc_lock);
5856+ }
5857+
5858+ evt->waiting = 1;
5859+ if (wait_event_interruptible(evt->wq,
5860+ (!list_empty(&evt->events_to_see) ||
5861+ dfchba->blocked))) {
5862+ mutex_lock(&lpfcdfc_lock);
5863+ lpfcdfc_event_unref(evt); /* release ref */
5864+ lpfcdfc_event_unref(evt); /* delete */
5865+ mutex_unlock(&lpfcdfc_lock);
5866+ return EINTR;
5867+ }
5868+
5869+ mutex_lock(&lpfcdfc_lock);
5870+ if (dfchba->blocked) {
5871+ lpfcdfc_event_unref(evt);
5872+ lpfcdfc_event_unref(evt);
5873+ mutex_unlock(&lpfcdfc_lock);
5874+ return ENODEV;
5875+ }
5876+ mutex_unlock(&lpfcdfc_lock);
5877+
5878+ evt->wait_time_stamp = jiffies;
5879+ evt->waiting = 0;
5880+
5881+ BUG_ON(list_empty(&evt->events_to_see));
5882+
5883+ mutex_lock(&lpfcdfc_lock);
5884+ list_move(evt->events_to_see.prev, &evt->events_to_get);
5885+ lpfcdfc_event_unref(evt); /* release ref */
5886+ mutex_unlock(&lpfcdfc_lock);
5887+
5888+ return ret_val;
5889+}
5890+
5891+static int
5892+lpfc_ioctl_loopback_mode(struct lpfc_hba *phba,
5893+ struct lpfcCmdInput *cip, void *dataout)
5894+{
5895+ struct Scsi_Host *shost;
5896+ struct lpfc_sli *psli = &phba->sli;
5897+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
5898+ uint32_t link_flags = cip->lpfc_arg4;
5899+ uint32_t timeout = cip->lpfc_arg5 * 100;
5900+ struct lpfc_vport **vports;
5901+ LPFC_MBOXQ_t *pmboxq;
5902+ int mbxstatus;
5903+ int i = 0;
5904+ int rc = 0;
5905+
5906+ if ((phba->link_state == LPFC_HBA_ERROR) ||
5907+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
5908+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
5909+ return EACCES;
5910+
5911+ if ((pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL)) == 0)
5912+ return ENOMEM;
5913+
5914+ vports = lpfc_create_vport_work_array(phba);
5915+ if (vports != NULL) {
5916+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){
5917+ shost = lpfc_shost_from_vport(vports[i]);
5918+ scsi_block_requests(shost);
5919+ }
5920+ lpfc_destroy_vport_work_array(phba, vports);
5921+ }
5922+ else {
5923+ shost = lpfc_shost_from_vport(phba->pport);
5924+ scsi_block_requests(shost);
5925+ }
5926+
5927+ while (pring->txcmplq_cnt) {
5928+ if (i++ > 500) /* wait up to 5 seconds */
5929+ break;
5930+
5931+ mdelay(10);
5932+ }
5933+
5934+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
5935+ pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
5936+ pmboxq->mb.mbxOwner = OWN_HOST;
5937+
5938+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
5939+
5940+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
5941+
5942+ /* wait for link down before proceeding */
5943+ i = 0;
5944+ while (phba->link_state != LPFC_LINK_DOWN) {
5945+ if (i++ > timeout) {
5946+ rc = ETIMEDOUT;
5947+ goto loopback_mode_exit;
5948+ }
5949+ msleep(10);
5950+ }
5951+
5952+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
5953+ if (link_flags == INTERNAL_LOOP_BACK)
5954+ pmboxq->mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
5955+ else
5956+ pmboxq->mb.un.varInitLnk.link_flags =
5957+ FLAGS_TOPOLOGY_MODE_LOOP;
5958+
5959+ pmboxq->mb.mbxCommand = MBX_INIT_LINK;
5960+ pmboxq->mb.mbxOwner = OWN_HOST;
5961+
5962+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
5963+ LPFC_MBOX_TMO);
5964+
5965+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->mb.mbxStatus))
5966+ rc = ENODEV;
5967+ else {
5968+ phba->link_flag |= LS_LOOPBACK_MODE;
5969+ /* wait for the link attention interrupt */
5970+ msleep(100);
5971+
5972+ i = 0;
5973+ while (phba->link_state != LPFC_HBA_READY) {
5974+ if (i++ > timeout) {
5975+ rc = ETIMEDOUT;
5976+ break;
5977+ }
5978+ msleep(10);
5979+ }
5980+ }
5981+ } else
5982+ rc = ENODEV;
5983+
5984+loopback_mode_exit:
5985+ vports = lpfc_create_vport_work_array(phba);
5986+ if (vports != NULL) {
5987+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++){
5988+ shost = lpfc_shost_from_vport(vports[i]);
5989+ scsi_unblock_requests(shost);
5990+ }
5991+ lpfc_destroy_vport_work_array(phba, vports);
5992+ }
5993+ else {
5994+ shost = lpfc_shost_from_vport(phba->pport);
5995+ scsi_unblock_requests(shost);
5996+ }
5997+
5998+ /*
5999+ * Let SLI layer release mboxq if mbox command completed after timeout.
6000+ */
6001+ if (mbxstatus != MBX_TIMEOUT)
6002+ mempool_free( pmboxq, phba->mbox_mem_pool);
6003+
6004+ return rc;
6005+}
6006+
6007+static int lpfcdfc_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
6008+{
6009+ LPFC_MBOXQ_t *mbox;
6010+ struct lpfc_dmabuf *dmabuff;
6011+ int status;
6012+
6013+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6014+ if (mbox == NULL)
6015+ return ENOMEM;
6016+
6017+ status = lpfc_reg_login(phba, 0, phba->pport->fc_myDID,
6018+ (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
6019+ if (status) {
6020+ mempool_free(mbox, phba->mbox_mem_pool);
6021+ return ENOMEM;
6022+ }
6023+
6024+ dmabuff = (struct lpfc_dmabuf *) mbox->context1;
6025+ mbox->context1 = NULL;
6026+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
6027+
6028+ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) {
6029+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
6030+ kfree(dmabuff);
6031+ if (status != MBX_TIMEOUT)
6032+ mempool_free(mbox, phba->mbox_mem_pool);
6033+ return ENODEV;
6034+ }
6035+
6036+ *rpi = mbox->mb.un.varWords[0];
6037+
6038+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
6039+ kfree(dmabuff);
6040+ mempool_free(mbox, phba->mbox_mem_pool);
6041+
6042+ return 0;
6043+}
6044+
6045+static int lpfcdfc_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
6046+{
6047+ LPFC_MBOXQ_t * mbox;
6048+ int status;
6049+
6050+ /* Allocate mboxq structure */
6051+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6052+ if (mbox == NULL)
6053+ return ENOMEM;
6054+
6055+ lpfc_unreg_login(phba, 0, rpi, mbox);
6056+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
6057+
6058+ if ((status != MBX_SUCCESS) || (mbox->mb.mbxStatus)) {
6059+ if (status != MBX_TIMEOUT)
6060+ mempool_free(mbox, phba->mbox_mem_pool);
6061+ return EIO;
6062+ }
6063+
6064+ mempool_free(mbox, phba->mbox_mem_pool);
6065+ return 0;
6066+}
6067+
6068+
6069+static int lpfcdfc_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
6070+ uint16_t *txxri, uint16_t * rxxri)
6071+{
6072+ struct lpfc_sli *psli = &phba->sli;
6073+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
6074+
6075+ struct lpfcdfc_host * dfchba;
6076+ struct lpfcdfc_event * evt;
6077+
6078+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
6079+ IOCB_t *cmd, *rsp;
6080+
6081+ struct lpfc_dmabuf * dmabuf;
6082+ struct ulp_bde64 *bpl = NULL;
6083+ struct lpfc_sli_ct_request *ctreq = NULL;
6084+
6085+ int ret_val = 0;
6086+
6087+ *txxri = 0;
6088+ *rxxri = 0;
6089+
6090+ mutex_lock(&lpfcdfc_lock);
6091+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
6092+ if (dfchba->phba == phba)
6093+ break;
6094+ }
6095+ mutex_unlock(&lpfcdfc_lock);
6096+ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
6097+
6098+ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid,
6099+ SLI_CT_ELX_LOOPBACK);
6100+ if (evt == NULL)
6101+ return ENOMEM;
6102+
6103+ mutex_lock(&lpfcdfc_lock);
6104+ list_add(&evt->node, &dfchba->ev_waiters);
6105+ lpfcdfc_event_ref(evt);
6106+ mutex_unlock(&lpfcdfc_lock);
6107+
6108+ cmdiocbq = lpfc_sli_get_iocbq(phba);
6109+ rspiocbq = lpfc_sli_get_iocbq(phba);
6110+
6111+ dmabuf = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
6112+ if (dmabuf) {
6113+ dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
6114+ INIT_LIST_HEAD(&dmabuf->list);
6115+ bpl = (struct ulp_bde64 *) dmabuf->virt;
6116+ memset(bpl, 0, sizeof(*bpl));
6117+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
6118+ bpl->addrHigh =
6119+ le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
6120+ bpl->addrLow =
6121+ le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
6122+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
6123+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
6124+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
6125+ }
6126+
6127+ if (cmdiocbq == NULL || rspiocbq == NULL ||
6128+ dmabuf == NULL || bpl == NULL || ctreq == NULL) {
6129+ ret_val = ENOMEM;
6130+ goto err_get_xri_exit;
6131+ }
6132+
6133+ cmd = &cmdiocbq->iocb;
6134+ rsp = &rspiocbq->iocb;
6135+
6136+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
6137+
6138+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
6139+ ctreq->RevisionId.bits.InId = 0;
6140+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
6141+ ctreq->FsSubType = 0;
6142+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
6143+ ctreq->CommandResponse.bits.Size = 0;
6144+
6145+
6146+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
6147+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
6148+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
6149+ cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
6150+
6151+ cmd->un.xseq64.w5.hcsw.Fctl = LA;
6152+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
6153+ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
6154+ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
6155+
6156+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
6157+ cmd->ulpBdeCount = 1;
6158+ cmd->ulpLe = 1;
6159+ cmd->ulpClass = CLASS3;
6160+ cmd->ulpContext = rpi;
6161+
6162+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
6163+ cmdiocbq->vport = phba->pport;
6164+
6165+ ret_val = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
6166+ (phba->fc_ratov * 2)
6167+ + LPFC_DRVR_TIMEOUT);
6168+ if (ret_val)
6169+ goto err_get_xri_exit;
6170+
6171+ *txxri = rsp->ulpContext;
6172+
6173+ evt->waiting = 1;
6174+ evt->wait_time_stamp = jiffies;
6175+ ret_val = wait_event_interruptible_timeout(
6176+ evt->wq, !list_empty(&evt->events_to_see),
6177+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
6178+ if (list_empty(&evt->events_to_see))
6179+ ret_val = (ret_val) ? EINTR : ETIMEDOUT;
6180+ else {
6181+ ret_val = IOCB_SUCCESS;
6182+ mutex_lock(&lpfcdfc_lock);
6183+ list_move(evt->events_to_see.prev, &evt->events_to_get);
6184+ mutex_unlock(&lpfcdfc_lock);
6185+ *rxxri = (list_entry(evt->events_to_get.prev,
6186+ typeof(struct event_data),
6187+ node))->immed_dat;
6188+ }
6189+ evt->waiting = 0;
6190+
6191+err_get_xri_exit:
6192+ mutex_lock(&lpfcdfc_lock);
6193+ lpfcdfc_event_unref(evt); /* release ref */
6194+ lpfcdfc_event_unref(evt); /* delete */
6195+ mutex_unlock(&lpfcdfc_lock);
6196+
6197+ if(dmabuf) {
6198+ if(dmabuf->virt)
6199+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
6200+ kfree(dmabuf);
6201+ }
6202+
6203+ if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
6204+ lpfc_sli_release_iocbq(phba, cmdiocbq);
6205+ if (rspiocbq)
6206+ lpfc_sli_release_iocbq(phba, rspiocbq);
6207+
6208+ return ret_val;
6209+}
6210+
6211+static int lpfcdfc_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
6212+ size_t len)
6213+{
6214+ struct lpfc_sli *psli = &phba->sli;
6215+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
6216+ struct lpfc_iocbq *cmdiocbq;
6217+ IOCB_t *cmd = NULL;
6218+ struct list_head head, *curr, *next;
6219+ struct lpfc_dmabuf *rxbmp;
6220+ struct lpfc_dmabuf *dmp;
6221+ struct lpfc_dmabuf *mp[2] = {NULL, NULL};
6222+ struct ulp_bde64 *rxbpl = NULL;
6223+ uint32_t num_bde;
6224+ struct lpfc_dmabufext *rxbuffer = NULL;
6225+ int ret_val = 0;
6226+ int i = 0;
6227+
6228+ cmdiocbq = lpfc_sli_get_iocbq(phba);
6229+ rxbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
6230+ if (rxbmp != NULL) {
6231+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
6232+ INIT_LIST_HEAD(&rxbmp->list);
6233+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
6234+ rxbuffer = dfc_cmd_data_alloc(phba, NULL, rxbpl, len);
6235+ }
6236+
6237+ if(cmdiocbq == NULL || rxbmp == NULL ||
6238+ rxbpl == NULL || rxbuffer == NULL) {
6239+ ret_val = ENOMEM;
6240+ goto err_post_rxbufs_exit;
6241+ }
6242+
6243+ /* Queue buffers for the receive exchange */
6244+ num_bde = (uint32_t)rxbuffer->flag;
6245+ dmp = &rxbuffer->dma;
6246+
6247+ cmd = &cmdiocbq->iocb;
6248+ i = 0;
6249+
6250+ INIT_LIST_HEAD(&head);
6251+ list_add_tail(&head, &dmp->list);
6252+ list_for_each_safe(curr, next, &head) {
6253+ mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
6254+ list_del(curr);
6255+
6256+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6257+ mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
6258+ cmd->un.quexri64cx.buff.bde.addrHigh =
6259+ putPaddrHigh(mp[i]->phys);
6260+ cmd->un.quexri64cx.buff.bde.addrLow =
6261+ putPaddrLow(mp[i]->phys);
6262+ cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
6263+ ((struct lpfc_dmabufext *)mp[i])->size;
6264+ cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
6265+ cmd->ulpCommand = CMD_QUE_XRI64_CX;
6266+ cmd->ulpPU = 0;
6267+ cmd->ulpLe = 1;
6268+ cmd->ulpBdeCount = 1;
6269+ cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
6270+
6271+ } else {
6272+ cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
6273+ cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
6274+ cmd->un.cont64[i].tus.f.bdeSize =
6275+ ((struct lpfc_dmabufext *)mp[i])->size;
6276+ cmd->ulpBdeCount = ++i;
6277+
6278+ if ((--num_bde > 0) && (i < 2))
6279+ continue;
6280+
6281+ cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
6282+ cmd->ulpLe = 1;
6283+ }
6284+
6285+ cmd->ulpClass = CLASS3;
6286+ cmd->ulpContext = rxxri;
6287+
6288+ ret_val = lpfc_sli_issue_iocb(phba, pring, cmdiocbq, 0);
6289+
6290+ if (ret_val == IOCB_ERROR) {
6291+ dfc_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[0]);
6292+ if (mp[1])
6293+ dfc_cmd_data_free(phba,
6294+ (struct lpfc_dmabufext *)mp[1]);
6295+ dmp = list_entry(next, struct lpfc_dmabuf, list);
6296+ ret_val = EIO;
6297+ goto err_post_rxbufs_exit;
6298+ }
6299+
6300+ lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
6301+ if (mp[1]) {
6302+ lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
6303+ mp[1] = NULL;
6304+ }
6305+
6306+ /* The iocb was freed by lpfc_sli_issue_iocb */
6307+ if ((cmdiocbq = lpfc_sli_get_iocbq(phba)) == NULL) {
6308+ dmp = list_entry(next, struct lpfc_dmabuf, list);
6309+ ret_val = EIO;
6310+ goto err_post_rxbufs_exit;
6311+ }
6312+ cmd = &cmdiocbq->iocb;
6313+ i = 0;
6314+ }
6315+ list_del(&head);
6316+
6317+err_post_rxbufs_exit:
6318+
6319+ if(rxbmp) {
6320+ if(rxbmp->virt)
6321+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
6322+ kfree(rxbmp);
6323+ }
6324+
6325+ if (cmdiocbq)
6326+ lpfc_sli_release_iocbq(phba, cmdiocbq);
6327+
6328+ return ret_val;
6329+}
6330+static int
6331+lpfc_ioctl_loopback_test(struct lpfc_hba *phba,
6332+ struct lpfcCmdInput *cip, void *dataout)
6333+{
6334+ struct lpfcdfc_host * dfchba;
6335+ struct lpfcdfc_event * evt;
6336+ struct event_data * evdat;
6337+
6338+ struct lpfc_sli *psli = &phba->sli;
6339+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
6340+ uint32_t size = cip->lpfc_outsz;
6341+ uint32_t full_size = size + ELX_LOOPBACK_HEADER_SZ;
6342+ size_t segment_len = 0, segment_offset = 0, current_offset = 0;
6343+ uint16_t rpi;
6344+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
6345+ IOCB_t *cmd, *rsp;
6346+ struct lpfc_sli_ct_request *ctreq;
6347+ struct lpfc_dmabuf *txbmp;
6348+ struct ulp_bde64 *txbpl = NULL;
6349+ struct lpfc_dmabufext *txbuffer = NULL;
6350+ struct list_head head;
6351+ struct lpfc_dmabuf *curr;
6352+ uint16_t txxri, rxxri;
6353+ uint32_t num_bde;
6354+ uint8_t *ptr = NULL, *rx_databuf = NULL;
6355+ int rc;
6356+
6357+ if ((phba->link_state == LPFC_HBA_ERROR) ||
6358+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
6359+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
6360+ return EACCES;
6361+
6362+ if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE))
6363+ return EACCES;
6364+
6365+ if ((size == 0) || (size > 80 * BUF_SZ_4K))
6366+ return ERANGE;
6367+
6368+ mutex_lock(&lpfcdfc_lock);
6369+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
6370+ if (dfchba->phba == phba)
6371+ break;
6372+ }
6373+ mutex_unlock(&lpfcdfc_lock);
6374+ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
6375+
6376+ rc = lpfcdfc_loop_self_reg(phba, &rpi);
6377+ if (rc)
6378+ return rc;
6379+
6380+ rc = lpfcdfc_loop_get_xri(phba, rpi, &txxri, &rxxri);
6381+ if (rc) {
6382+ lpfcdfc_loop_self_unreg(phba, rpi);
6383+ return rc;
6384+ }
6385+
6386+ rc = lpfcdfc_loop_post_rxbufs(phba, rxxri, full_size);
6387+ if (rc) {
6388+ lpfcdfc_loop_self_unreg(phba, rpi);
6389+ return rc;
6390+ }
6391+
6392+ evt = lpfcdfc_event_new(FC_REG_CT_EVENT, current->pid,
6393+ SLI_CT_ELX_LOOPBACK);
6394+ if (evt == NULL) {
6395+ lpfcdfc_loop_self_unreg(phba, rpi);
6396+ return ENOMEM;
6397+ }
6398+
6399+ mutex_lock(&lpfcdfc_lock);
6400+ list_add(&evt->node, &dfchba->ev_waiters);
6401+ lpfcdfc_event_ref(evt);
6402+ mutex_unlock(&lpfcdfc_lock);
6403+
6404+ cmdiocbq = lpfc_sli_get_iocbq(phba);
6405+ rspiocbq = lpfc_sli_get_iocbq(phba);
6406+ txbmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
6407+
6408+ if (txbmp) {
6409+ txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
6410+ INIT_LIST_HEAD(&txbmp->list);
6411+ txbpl = (struct ulp_bde64 *) txbmp->virt;
6412+ if (txbpl)
6413+ txbuffer = dfc_cmd_data_alloc(phba, NULL,
6414+ txbpl, full_size);
6415+ }
6416+
6417+ if (cmdiocbq == NULL || rspiocbq == NULL
6418+ || txbmp == NULL || txbpl == NULL || txbuffer == NULL) {
6419+ rc = ENOMEM;
6420+ goto err_loopback_test_exit;
6421+ }
6422+
6423+ cmd = &cmdiocbq->iocb;
6424+ rsp = &rspiocbq->iocb;
6425+
6426+ INIT_LIST_HEAD(&head);
6427+ list_add_tail(&head, &txbuffer->dma.list);
6428+ list_for_each_entry(curr, &head, list) {
6429+ segment_len = ((struct lpfc_dmabufext *)curr)->size;
6430+ if (current_offset == 0) {
6431+ ctreq = curr->virt;
6432+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
6433+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
6434+ ctreq->RevisionId.bits.InId = 0;
6435+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
6436+ ctreq->FsSubType = 0;
6437+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA ;
6438+ ctreq->CommandResponse.bits.Size = size;
6439+ segment_offset = ELX_LOOPBACK_HEADER_SZ;
6440+ } else
6441+ segment_offset = 0;
6442+
6443+ BUG_ON(segment_offset >= segment_len);
6444+ if (copy_from_user (curr->virt + segment_offset,
6445+ (void __user *)cip->lpfc_arg1
6446+ + current_offset,
6447+ segment_len - segment_offset)) {
6448+ rc = EIO;
6449+ list_del(&head);
6450+ goto err_loopback_test_exit;
6451+ }
6452+
6453+ current_offset += segment_len - segment_offset;
6454+ BUG_ON(current_offset > size);
6455+ }
6456+ list_del(&head);
6457+
6458+ /* Build the XMIT_SEQUENCE iocb */
6459+
6460+ num_bde = (uint32_t)txbuffer->flag;
6461+
6462+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
6463+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
6464+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
6465+ cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
6466+
6467+ cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
6468+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
6469+ cmd->un.xseq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
6470+ cmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
6471+
6472+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
6473+ cmd->ulpBdeCount = 1;
6474+ cmd->ulpLe = 1;
6475+ cmd->ulpClass = CLASS3;
6476+ cmd->ulpContext = txxri;
6477+
6478+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
6479+ cmdiocbq->vport = phba->pport;
6480+
6481+ rc = lpfc_sli_issue_iocb_wait(phba, pring, cmdiocbq, rspiocbq,
6482+ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
6483+
6484+ if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
6485+ rc = EIO;
6486+ goto err_loopback_test_exit;
6487+ }
6488+
6489+ evt->waiting = 1;
6490+ rc = wait_event_interruptible_timeout(
6491+ evt->wq, !list_empty(&evt->events_to_see),
6492+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
6493+ evt->waiting = 0;
6494+ if (list_empty(&evt->events_to_see))
6495+ rc = (rc) ? EINTR : ETIMEDOUT;
6496+ else {
6497+ ptr = dataout;
6498+ mutex_lock(&lpfcdfc_lock);
6499+ list_move(evt->events_to_see.prev, &evt->events_to_get);
6500+ evdat = list_entry(evt->events_to_get.prev,
6501+ typeof(*evdat), node);
6502+ mutex_unlock(&lpfcdfc_lock);
6503+ rx_databuf = evdat->data;
6504+ if (evdat->len != full_size) {
6505+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
6506+ "1603 Loopback test did not receive expected "
6507+ "data length. actual length 0x%x expected "
6508+ "length 0x%x\n",
6509+ evdat->len, full_size);
6510+ rc = EIO;
6511+ }
6512+ else if (rx_databuf == NULL)
6513+ rc = EIO;
6514+ else {
6515+ rx_databuf += ELX_LOOPBACK_HEADER_SZ;
6516+ memcpy(ptr, rx_databuf, size);
6517+ rc = IOCB_SUCCESS;
6518+ }
6519+ }
6520+
6521+err_loopback_test_exit:
6522+ lpfcdfc_loop_self_unreg(phba, rpi);
6523+
6524+ mutex_lock(&lpfcdfc_lock);
6525+ lpfcdfc_event_unref(evt); /* release ref */
6526+ lpfcdfc_event_unref(evt); /* delete */
6527+ mutex_unlock(&lpfcdfc_lock);
6528+
6529+ if (cmdiocbq != NULL)
6530+ lpfc_sli_release_iocbq(phba, cmdiocbq);
6531+
6532+ if (rspiocbq != NULL)
6533+ lpfc_sli_release_iocbq(phba, rspiocbq);
6534+
6535+ if (txbmp != NULL) {
6536+ if (txbpl != NULL) {
6537+ if (txbuffer != NULL)
6538+ dfc_cmd_data_free(phba, txbuffer);
6539+ lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
6540+ }
6541+ kfree(txbmp);
6542+ }
6543+ return rc;
6544+}
6545+
6546+static int
6547+dfc_rsp_data_copy(struct lpfc_hba * phba,
6548+ uint8_t * outdataptr, struct lpfc_dmabufext * mlist,
6549+ uint32_t size)
6550+{
6551+ struct lpfc_dmabufext *mlast = NULL;
6552+ int cnt, offset = 0;
6553+ struct list_head head, *curr, *next;
6554+
6555+ if (!mlist)
6556+ return 0;
6557+
6558+ list_add_tail(&head, &mlist->dma.list);
6559+
6560+ list_for_each_safe(curr, next, &head) {
6561+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
6562+ if (!size)
6563+ break;
6564+
6565+ /* We copy chucks of 4K */
6566+ if (size > BUF_SZ_4K)
6567+ cnt = BUF_SZ_4K;
6568+ else
6569+ cnt = size;
6570+
6571+ if (outdataptr) {
6572+ pci_dma_sync_single_for_device(phba->pcidev,
6573+ mlast->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
6574+
6575+ /* Copy data to user space */
6576+ if (copy_to_user
6577+ ((void __user *) (outdataptr + offset),
6578+ (uint8_t *) mlast->dma.virt, cnt))
6579+ return 1;
6580+ }
6581+ offset += cnt;
6582+ size -= cnt;
6583+ }
6584+ list_del(&head);
6585+ return 0;
6586+}
6587+
6588+static int
6589+lpfc_issue_ct_rsp(struct lpfc_hba * phba, uint32_t tag,
6590+ struct lpfc_dmabuf * bmp,
6591+ struct lpfc_dmabufext * inp)
6592+{
6593+ struct lpfc_sli *psli;
6594+ IOCB_t *icmd;
6595+ struct lpfc_iocbq *ctiocb;
6596+ struct lpfc_sli_ring *pring;
6597+ uint32_t num_entry;
6598+ int rc = 0;
6599+
6600+ psli = &phba->sli;
6601+ pring = &psli->ring[LPFC_ELS_RING];
6602+ num_entry = inp->flag;
6603+ inp->flag = 0;
6604+
6605+ /* Allocate buffer for command iocb */
6606+ ctiocb = lpfc_sli_get_iocbq(phba);
6607+ if (!ctiocb) {
6608+ rc = ENOMEM;
6609+ goto issue_ct_rsp_exit;
6610+ }
6611+ icmd = &ctiocb->iocb;
6612+
6613+ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
6614+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
6615+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
6616+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
6617+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64));
6618+ icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
6619+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
6620+ icmd->un.xseq64.w5.hcsw.Rctl = FC_SOL_CTL;
6621+ icmd->un.xseq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
6622+
6623+ pci_dma_sync_single_for_device(phba->pcidev, bmp->phys, LPFC_BPL_SIZE,
6624+ PCI_DMA_TODEVICE);
6625+
6626+ /* Fill in rest of iocb */
6627+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
6628+ icmd->ulpBdeCount = 1;
6629+ icmd->ulpLe = 1;
6630+ icmd->ulpClass = CLASS3;
6631+ icmd->ulpContext = (ushort) tag;
6632+ icmd->ulpTimeout = phba->fc_ratov * 2;
6633+
6634+ /* Xmit CT response on exchange <xid> */
6635+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
6636+ "1200 Xmit CT response on exchange x%x Data: x%x x%x\n",
6637+ icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
6638+
6639+ ctiocb->iocb_cmpl = NULL;
6640+ ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
6641+ ctiocb->vport = phba->pport;
6642+ rc = lpfc_sli_issue_iocb_wait(phba, pring, ctiocb, NULL,
6643+ phba->fc_ratov * 2 + LPFC_DRVR_TIMEOUT);
6644+
6645+ if (rc == IOCB_TIMEDOUT) {
6646+ ctiocb->context1 = NULL;
6647+ ctiocb->context2 = NULL;
6648+ ctiocb->iocb_cmpl = lpfc_ioctl_timeout_iocb_cmpl;
6649+ return rc;
6650+ }
6651+
6652+ /* Calling routine takes care of IOCB_ERROR => EIO translation */
6653+ if (rc != IOCB_SUCCESS)
6654+ rc = IOCB_ERROR;
6655+
6656+ lpfc_sli_release_iocbq(phba, ctiocb);
6657+issue_ct_rsp_exit:
6658+ return rc;
6659+}
6660+
6661+
6662+static void
6663+lpfcdfc_ct_unsol_event(struct lpfc_hba * phba,
6664+ struct lpfc_sli_ring * pring,
6665+ struct lpfc_iocbq * piocbq)
6666+{
6667+ struct lpfcdfc_host * dfchba = lpfcdfc_host_from_hba(phba);
6668+ uint32_t evt_req_id = 0;
6669+ uint32_t cmd;
6670+ uint32_t len;
6671+ struct lpfc_dmabuf *dmabuf = NULL;
6672+ struct lpfcdfc_event * evt;
6673+ struct event_data * evt_dat = NULL;
6674+ struct lpfc_iocbq * iocbq;
6675+ size_t offset = 0;
6676+ struct list_head head;
6677+ struct ulp_bde64 * bde;
6678+ dma_addr_t dma_addr;
6679+ int i;
6680+ struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
6681+ struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
6682+ struct lpfc_hbq_entry *hbqe;
6683+
6684+ BUG_ON(&dfchba->node == &lpfcdfc_hosts);
6685+ INIT_LIST_HEAD(&head);
6686+ if (piocbq->iocb.ulpBdeCount == 0 ||
6687+ piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
6688+ goto error_unsol_ct_exit;
6689+
6690+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
6691+ dmabuf = bdeBuf1;
6692+ else {
6693+ dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
6694+ piocbq->iocb.un.cont64[0].addrLow);
6695+ dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
6696+ }
6697+ BUG_ON(dmabuf == NULL);
6698+ evt_req_id = ((struct lpfc_sli_ct_request *)(dmabuf->virt))->FsType;
6699+ cmd = ((struct lpfc_sli_ct_request *)
6700+ (dmabuf->virt))->CommandResponse.bits.CmdRsp;
6701+ len = ((struct lpfc_sli_ct_request *)
6702+ (dmabuf->virt))->CommandResponse.bits.Size;
6703+ if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
6704+ lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
6705+
6706+ mutex_lock(&lpfcdfc_lock);
6707+ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
6708+ if (!(evt->type_mask & FC_REG_CT_EVENT) ||
6709+ evt->req_id != evt_req_id)
6710+ continue;
6711+
6712+ lpfcdfc_event_ref(evt);
6713+
6714+ if ((evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL)) == NULL) {
6715+ lpfcdfc_event_unref(evt);
6716+ break;
6717+ }
6718+
6719+ mutex_unlock(&lpfcdfc_lock);
6720+
6721+ INIT_LIST_HEAD(&head);
6722+ list_add_tail(&head, &piocbq->list);
6723+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6724+ /* take accumulated byte count from the last iocbq */
6725+ iocbq = list_entry(head.prev, typeof(*iocbq), list);
6726+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
6727+ } else {
6728+ list_for_each_entry(iocbq, &head, list) {
6729+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
6730+ evt_dat->len +=
6731+ iocbq->iocb.un.cont64[i].tus.f.bdeSize;
6732+ }
6733+ }
6734+
6735+
6736+ evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
6737+ if (evt_dat->data == NULL) {
6738+ kfree (evt_dat);
6739+ mutex_lock(&lpfcdfc_lock);
6740+ lpfcdfc_event_unref(evt);
6741+ mutex_unlock(&lpfcdfc_lock);
6742+ goto error_unsol_ct_exit;
6743+ }
6744+
6745+ list_for_each_entry(iocbq, &head, list) {
6746+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6747+ bdeBuf1 = iocbq->context2;
6748+ bdeBuf2 = iocbq->context3;
6749+ }
6750+ for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
6751+ int size = 0;
6752+ if (phba->sli3_options &
6753+ LPFC_SLI3_HBQ_ENABLED) {
6754+ BUG_ON(i>1);
6755+ if (i == 0) {
6756+ hbqe = (struct lpfc_hbq_entry *)
6757+ &iocbq->iocb.un.ulpWord[0];
6758+ size = hbqe->bde.tus.f.bdeSize;
6759+ dmabuf = bdeBuf1;
6760+ } else if (i == 1) {
6761+ hbqe = (struct lpfc_hbq_entry *)
6762+ &iocbq->iocb.unsli3.
6763+ sli3Words[4];
6764+ size = hbqe->bde.tus.f.bdeSize;
6765+ dmabuf = bdeBuf2;
6766+ }
6767+ if ((offset + size) > evt_dat->len)
6768+ size = evt_dat->len - offset;
6769+ } else {
6770+ size = iocbq->iocb.un.cont64[i].
6771+ tus.f.bdeSize;
6772+ bde = &iocbq->iocb.un.cont64[i];
6773+ dma_addr = getPaddr(bde->addrHigh,
6774+ bde->addrLow);
6775+ dmabuf = lpfc_sli_ringpostbuf_get(phba,
6776+ pring, dma_addr);
6777+ }
6778+ if (dmabuf == NULL) {
6779+ kfree (evt_dat->data);
6780+ kfree (evt_dat);
6781+ mutex_lock(&lpfcdfc_lock);
6782+ lpfcdfc_event_unref(evt);
6783+ mutex_unlock(&lpfcdfc_lock);
6784+ goto error_unsol_ct_exit;
6785+ }
6786+ memcpy ((char *)(evt_dat->data) + offset,
6787+ dmabuf->virt, size);
6788+ offset += size;
6789+ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
6790+ !(phba->sli3_options &
6791+ LPFC_SLI3_HBQ_ENABLED))
6792+ lpfc_sli_ringpostbuf_put(phba, pring,
6793+ dmabuf);
6794+ else {
6795+ switch (cmd) {
6796+ case ELX_LOOPBACK_DATA:
6797+ dfc_cmd_data_free(phba,
6798+ (struct lpfc_dmabufext *)
6799+ dmabuf);
6800+ break;
6801+ case ELX_LOOPBACK_XRI_SETUP:
6802+ if (!(phba->sli3_options &
6803+ LPFC_SLI3_HBQ_ENABLED))
6804+ lpfc_post_buffer(phba,
6805+ pring,
6806+ 1);
6807+ else
6808+ lpfc_in_buf_free(phba,
6809+ dmabuf);
6810+ break;
6811+ default:
6812+ if (!(phba->sli3_options &
6813+ LPFC_SLI3_HBQ_ENABLED))
6814+ lpfc_post_buffer(phba,
6815+ pring,
6816+ 1);
6817+ break;
6818+ }
6819+ }
6820+ }
6821+ }
6822+
6823+ mutex_lock(&lpfcdfc_lock);
6824+ evt_dat->immed_dat = piocbq->iocb.ulpContext;
6825+ evt_dat->type = FC_REG_CT_EVENT;
6826+ list_add(&evt_dat->node, &evt->events_to_see);
6827+ wake_up_interruptible(&evt->wq);
6828+ lpfcdfc_event_unref(evt);
6829+ if (evt_req_id == SLI_CT_ELX_LOOPBACK)
6830+ break;
6831+ }
6832+ mutex_unlock(&lpfcdfc_lock);
6833+
6834+error_unsol_ct_exit:
6835+ if(!list_empty(&head))
6836+ list_del(&head);
6837+ if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
6838+ dfchba->base_ct_unsol_event != NULL)
6839+ (dfchba->base_ct_unsol_event)(phba, pring, piocbq);
6840+
6841+ return;
6842+}
6843+
6844+
6845+struct lpfc_dmabufext *
6846+__dfc_cmd_data_alloc(struct lpfc_hba * phba,
6847+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
6848+ int nocopydata)
6849+{
6850+ struct lpfc_dmabufext *mlist = NULL;
6851+ struct lpfc_dmabufext *dmp;
6852+ int cnt, offset = 0, i = 0;
6853+ struct pci_dev *pcidev;
6854+
6855+ pcidev = phba->pcidev;
6856+
6857+ while (size) {
6858+ /* We get chunks of 4K */
6859+ if (size > BUF_SZ_4K)
6860+ cnt = BUF_SZ_4K;
6861+ else
6862+ cnt = size;
6863+
6864+ /* allocate struct lpfc_dmabufext buffer header */
6865+ dmp = kmalloc(sizeof (struct lpfc_dmabufext), GFP_KERNEL);
6866+ if (dmp == 0)
6867+ goto out;
6868+
6869+ INIT_LIST_HEAD(&dmp->dma.list);
6870+
6871+ /* Queue it to a linked list */
6872+ if (mlist)
6873+ list_add_tail(&dmp->dma.list, &mlist->dma.list);
6874+ else
6875+ mlist = dmp;
6876+
6877+ /* allocate buffer */
6878+ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
6879+ cnt,
6880+ &(dmp->dma.phys),
6881+ GFP_KERNEL);
6882+
6883+ if (dmp->dma.virt == NULL)
6884+ goto out;
6885+
6886+ dmp->size = cnt;
6887+
6888+ if (indataptr || nocopydata) {
6889+ if (indataptr)
6890+ /* Copy data from user space in */
6891+ if (copy_from_user ((uint8_t *) dmp->dma.virt,
6892+ (void __user *) (indataptr + offset),
6893+ cnt)) {
6894+ goto out;
6895+ }
6896+
6897+ pci_dma_sync_single_for_device(phba->pcidev,
6898+ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
6899+
6900+ } else
6901+ memset((uint8_t *)dmp->dma.virt, 0, cnt);
6902+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
6903+
6904+ /* build buffer ptr list for IOCB */
6905+ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
6906+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
6907+ bpl->tus.f.bdeSize = (ushort) cnt;
6908+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
6909+ bpl++;
6910+
6911+ i++;
6912+ offset += cnt;
6913+ size -= cnt;
6914+ }
6915+
6916+ mlist->flag = i;
6917+ return mlist;
6918+out:
6919+ dfc_cmd_data_free(phba, mlist);
6920+ return NULL;
6921+}
6922+
6923+static struct lpfc_dmabufext *
6924+dfc_cmd_data_alloc(struct lpfc_hba * phba,
6925+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size)
6926+{
6927+ /* if indataptr is null it is a rsp buffer. */
6928+ return __dfc_cmd_data_alloc(phba, indataptr, bpl, size,
6929+ 0 /* don't copy user data */);
6930+}
6931+
6932+int
6933+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
6934+{
6935+ return dfc_cmd_data_free(phba, mlist);
6936+}
6937+static int
6938+dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist)
6939+{
6940+ struct lpfc_dmabufext *mlast;
6941+ struct pci_dev *pcidev;
6942+ struct list_head head, *curr, *next;
6943+
6944+ if ((!mlist) || (!lpfc_is_link_up(phba) &&
6945+ (phba->link_flag & LS_LOOPBACK_MODE))) {
6946+ return 0;
6947+ }
6948+
6949+ pcidev = phba->pcidev;
6950+ list_add_tail(&head, &mlist->dma.list);
6951+
6952+ list_for_each_safe(curr, next, &head) {
6953+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
6954+ if (mlast->dma.virt)
6955+ dma_free_coherent(&pcidev->dev,
6956+ mlast->size,
6957+ mlast->dma.virt,
6958+ mlast->dma.phys);
6959+ kfree(mlast);
6960+ }
6961+ return 0;
6962+}
6963+
6964+
6965+/* The only reason we need that reverce find, is because we
6966+ * are bent on keeping original calling conventions.
6967+ */
6968+static struct lpfcdfc_host *
6969+lpfcdfc_host_from_hba(struct lpfc_hba * phba)
6970+{
6971+ struct lpfcdfc_host * dfchba;
6972+
6973+ mutex_lock(&lpfcdfc_lock);
6974+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
6975+ if (dfchba->phba == phba)
6976+ break;
6977+ }
6978+ mutex_unlock(&lpfcdfc_lock);
6979+
6980+ return dfchba;
6981+}
6982+
6983+struct lpfcdfc_host *
6984+lpfcdfc_host_add (struct pci_dev * dev,
6985+ struct Scsi_Host * host,
6986+ struct lpfc_hba * phba)
6987+{
6988+ struct lpfcdfc_host * dfchba = NULL;
6989+ struct lpfc_sli_ring_mask * prt = NULL;
6990+
6991+ dfchba = kzalloc(sizeof(*dfchba), GFP_KERNEL);
6992+ if (dfchba == NULL)
6993+ return NULL;
6994+
6995+ dfchba->inst = phba->brd_no;
6996+ dfchba->phba = phba;
6997+ dfchba->vport = phba->pport;
6998+ dfchba->host = host;
6999+ dfchba->dev = dev;
7000+ dfchba->blocked = 0;
7001+
7002+ spin_lock_irq(&phba->hbalock);
7003+ prt = phba->sli.ring[LPFC_ELS_RING].prt;
7004+ dfchba->base_ct_unsol_event = prt[2].lpfc_sli_rcv_unsol_event;
7005+ prt[2].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event;
7006+ prt[3].lpfc_sli_rcv_unsol_event = lpfcdfc_ct_unsol_event;
7007+ spin_unlock_irq(&phba->hbalock);
7008+ mutex_lock(&lpfcdfc_lock);
7009+ list_add_tail(&dfchba->node, &lpfcdfc_hosts);
7010+ INIT_LIST_HEAD(&dfchba->ev_waiters);
7011+ mutex_unlock(&lpfcdfc_lock);
7012+
7013+ return dfchba;
7014+}
7015+
7016+
7017+void
7018+lpfcdfc_host_del (struct lpfcdfc_host * dfchba)
7019+{
7020+ struct Scsi_Host * host;
7021+ struct lpfc_hba * phba = NULL;
7022+ struct lpfc_sli_ring_mask * prt = NULL;
7023+ struct lpfcdfc_event * evt;
7024+
7025+ mutex_lock(&lpfcdfc_lock);
7026+ dfchba->blocked = 1;
7027+
7028+ list_for_each_entry(evt, &dfchba->ev_waiters, node) {
7029+ wake_up_interruptible(&evt->wq);
7030+ }
7031+
7032+ while (dfchba->ref_count) {
7033+ mutex_unlock(&lpfcdfc_lock);
7034+ msleep(2000);
7035+ mutex_lock(&lpfcdfc_lock);
7036+ }
7037+
7038+ if (dfchba->dev->driver) {
7039+ host = pci_get_drvdata(dfchba->dev);
7040+ if ((host != NULL) &&
7041+ (struct lpfc_vport *)host->hostdata == dfchba->vport) {
7042+ phba = dfchba->phba;
7043+ mutex_unlock(&lpfcdfc_lock);
7044+ spin_lock_irq(&phba->hbalock);
7045+ prt = phba->sli.ring[LPFC_ELS_RING].prt;
7046+ prt[2].lpfc_sli_rcv_unsol_event =
7047+ dfchba->base_ct_unsol_event;
7048+ prt[3].lpfc_sli_rcv_unsol_event =
7049+ dfchba->base_ct_unsol_event;
7050+ spin_unlock_irq(&phba->hbalock);
7051+ mutex_lock(&lpfcdfc_lock);
7052+ }
7053+ }
7054+ list_del_init(&dfchba->node);
7055+ mutex_unlock(&lpfcdfc_lock);
7056+ kfree (dfchba);
7057+}
7058+
7059+/*
7060+ * Retrieve lpfc_hba * matching instance (board no)
7061+ * If found return lpfc_hba *
7062+ * If not found return NULL
7063+ */
7064+static struct lpfcdfc_host *
7065+lpfcdfc_get_phba_by_inst(int inst)
7066+{
7067+ struct Scsi_Host * host = NULL;
7068+ struct lpfcdfc_host * dfchba;
7069+
7070+ mutex_lock(&lpfcdfc_lock);
7071+ list_for_each_entry(dfchba, &lpfcdfc_hosts, node) {
7072+ if (dfchba->inst == inst) {
7073+ if (dfchba->dev->driver) {
7074+ host = pci_get_drvdata(dfchba->dev);
7075+ if ((host != NULL) &&
7076+ (struct lpfc_vport *)host->hostdata ==
7077+ dfchba->vport) {
7078+ mutex_unlock(&lpfcdfc_lock);
7079+ BUG_ON(dfchba->phba->brd_no != inst);
7080+ return dfchba;
7081+ }
7082+ }
7083+ mutex_unlock(&lpfcdfc_lock);
7084+ return NULL;
7085+ }
7086+ }
7087+ mutex_unlock(&lpfcdfc_lock);
7088+
7089+ return NULL;
7090+}
7091+
7092+static int
7093+lpfcdfc_do_ioctl(struct lpfcCmdInput *cip)
7094+{
7095+ struct lpfcdfc_host * dfchba = NULL;
7096+ struct lpfc_hba *phba = NULL;
7097+ int rc;
7098+ uint32_t total_mem;
7099+ void *dataout;
7100+
7101+
7102+ /* Some ioctls are per module and do not need phba */
7103+ switch (cip->lpfc_cmd) {
7104+ case LPFC_GET_DFC_REV:
7105+ break;
7106+ default:
7107+ dfchba = lpfcdfc_get_phba_by_inst(cip->lpfc_brd);
7108+ if (dfchba == NULL)
7109+ return EINVAL;
7110+ phba = dfchba->phba;
7111+ break;
7112+ };
7113+
7114+ if (phba)
7115+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7116+ "1601 libdfc ioctl entry Data: x%x x%lx x%lx x%x\n",
7117+ cip->lpfc_cmd, (unsigned long) cip->lpfc_arg1,
7118+ (unsigned long) cip->lpfc_arg2, cip->lpfc_outsz);
7119+ mutex_lock(&lpfcdfc_lock);
7120+ if (dfchba && dfchba->blocked) {
7121+ mutex_unlock(&lpfcdfc_lock);
7122+ return EINVAL;
7123+ }
7124+ if (dfchba)
7125+ dfchba->ref_count++;
7126+ mutex_unlock(&lpfcdfc_lock);
7127+ if (cip->lpfc_outsz >= BUF_SZ_4K) {
7128+
7129+ /*
7130+ * Allocate memory for ioctl data. If buffer is bigger than 64k,
7131+ * then we allocate 64k and re-use that buffer over and over to
7132+ * xfer the whole block. This is because Linux kernel has a
7133+ * problem allocating more than 120k of kernel space memory. Saw
7134+ * problem with GET_FCPTARGETMAPPING...
7135+ */
7136+ if (cip->lpfc_outsz <= (64 * 1024))
7137+ total_mem = cip->lpfc_outsz;
7138+ else
7139+ total_mem = 64 * 1024;
7140+ } else {
7141+ /* Allocate memory for ioctl data */
7142+ total_mem = BUF_SZ_4K;
7143+ }
7144+
7145+ /*
7146+ * For LPFC_HBA_GET_EVENT allocate memory which is needed to store
7147+ * event info. Allocating maximum possible buffer size (64KB) can fail
7148+ * some times under heavy IO.
7149+ */
7150+ if (cip->lpfc_cmd == LPFC_HBA_GET_EVENT) {
7151+ dataout = NULL;
7152+ } else {
7153+ dataout = kmalloc(total_mem, GFP_KERNEL);
7154+
7155+ if (!dataout && dfchba != NULL) {
7156+ mutex_lock(&lpfcdfc_lock);
7157+ if (dfchba)
7158+ dfchba->ref_count--;
7159+ mutex_unlock(&lpfcdfc_lock);
7160+ return ENOMEM;
7161+ }
7162+ }
7163+
7164+ switch (cip->lpfc_cmd) {
7165+
7166+ case LPFC_GET_DFC_REV:
7167+ ((struct DfcRevInfo *) dataout)->a_Major = DFC_MAJOR_REV;
7168+ ((struct DfcRevInfo *) dataout)->a_Minor = DFC_MINOR_REV;
7169+ cip->lpfc_outsz = sizeof (struct DfcRevInfo);
7170+ rc = 0;
7171+ break;
7172+
7173+ case LPFC_SEND_ELS:
7174+ rc = lpfc_ioctl_send_els(phba, cip, dataout);
7175+ break;
7176+
7177+ case LPFC_HBA_SEND_MGMT_RSP:
7178+ rc = lpfc_ioctl_send_mgmt_rsp(phba, cip);
7179+ break;
7180+
7181+ case LPFC_HBA_SEND_MGMT_CMD:
7182+ case LPFC_CT:
7183+ rc = lpfc_ioctl_send_mgmt_cmd(phba, cip, dataout);
7184+ break;
7185+
7186+ case LPFC_HBA_GET_EVENT:
7187+ rc = lpfc_ioctl_hba_get_event(phba, cip, &dataout, &total_mem);
7188+ if ((total_mem) && (copy_to_user ((void __user *)
7189+ cip->lpfc_dataout, (uint8_t *) dataout, total_mem)))
7190+ rc = EIO;
7191+ /* This is to prevent copy_to_user at end of the function. */
7192+ cip->lpfc_outsz = 0;
7193+ break;
7194+
7195+ case LPFC_HBA_SET_EVENT:
7196+ rc = lpfc_ioctl_hba_set_event(phba, cip);
7197+ break;
7198+
7199+ case LPFC_LOOPBACK_MODE:
7200+ rc = lpfc_ioctl_loopback_mode(phba, cip, dataout);
7201+ break;
7202+
7203+ case LPFC_LOOPBACK_TEST:
7204+ rc = lpfc_ioctl_loopback_test(phba, cip, dataout);
7205+ break;
7206+
7207+ case LPFC_HBA_RNID:
7208+ rc = lpfc_ioctl_hba_rnid(phba, cip, dataout);
7209+ break;
7210+
7211+ default:
7212+ rc = EINVAL;
7213+ break;
7214+ }
7215+
7216+ if (phba)
7217+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7218+ "1602 libdfc ioctl exit Data: x%x x%x x%lx\n",
7219+ rc, cip->lpfc_outsz, (unsigned long) cip->lpfc_dataout);
7220+ /* Copy data to user space config method */
7221+ if (rc == 0) {
7222+ if (cip->lpfc_outsz) {
7223+ if (copy_to_user
7224+ ((void __user *) cip->lpfc_dataout,
7225+ (uint8_t *) dataout, cip->lpfc_outsz)) {
7226+ rc = EIO;
7227+ }
7228+ }
7229+ }
7230+
7231+ kfree(dataout);
7232+ mutex_lock(&lpfcdfc_lock);
7233+ if (dfchba)
7234+ dfchba->ref_count--;
7235+ mutex_unlock(&lpfcdfc_lock);
7236+
7237+ return rc;
7238+}
7239+
7240+static int
7241+lpfcdfc_ioctl(struct inode *inode,
7242+ struct file *file, unsigned int cmd, unsigned long arg)
7243+{
7244+ int rc;
7245+ struct lpfcCmdInput *ci;
7246+
7247+ if (!arg)
7248+ return -EINVAL;
7249+
7250+ ci = (struct lpfcCmdInput *) kmalloc(sizeof (struct lpfcCmdInput),
7251+ GFP_KERNEL);
7252+
7253+ if (!ci)
7254+ return -ENOMEM;
7255+
7256+ if ((rc = copy_from_user
7257+ ((uint8_t *) ci, (void __user *) arg,
7258+ sizeof (struct lpfcCmdInput)))) {
7259+ kfree(ci);
7260+ return -EIO;
7261+ }
7262+
7263+ rc = lpfcdfc_do_ioctl(ci);
7264+
7265+ kfree(ci);
7266+ return -rc;
7267+}
7268+
7269+#ifdef CONFIG_COMPAT
7270+static long
7271+lpfcdfc_compat_ioctl(struct file * file, unsigned int cmd, unsigned long arg)
7272+{
7273+ struct lpfcCmdInput32 arg32;
7274+ struct lpfcCmdInput arg64;
7275+ int ret;
7276+
7277+ if(copy_from_user(&arg32, (void __user *)arg,
7278+ sizeof(struct lpfcCmdInput32)))
7279+ return -EFAULT;
7280+
7281+ arg64.lpfc_brd = arg32.lpfc_brd;
7282+ arg64.lpfc_ring = arg32.lpfc_ring;
7283+ arg64.lpfc_iocb = arg32.lpfc_iocb;
7284+ arg64.lpfc_flag = arg32.lpfc_flag;
7285+ arg64.lpfc_arg1 = (void *)(unsigned long) arg32.lpfc_arg1;
7286+ arg64.lpfc_arg2 = (void *)(unsigned long) arg32.lpfc_arg2;
7287+ arg64.lpfc_arg3 = (void *)(unsigned long) arg32.lpfc_arg3;
7288+ arg64.lpfc_dataout = (void *)(unsigned long) arg32.lpfc_dataout;
7289+ arg64.lpfc_cmd = arg32.lpfc_cmd;
7290+ arg64.lpfc_outsz = arg32.lpfc_outsz;
7291+ arg64.lpfc_arg4 = arg32.lpfc_arg4;
7292+ arg64.lpfc_arg5 = arg32.lpfc_arg5;
7293+
7294+ ret = lpfcdfc_do_ioctl(&arg64);
7295+
7296+ arg32.lpfc_brd = arg64.lpfc_brd;
7297+ arg32.lpfc_ring = arg64.lpfc_ring;
7298+ arg32.lpfc_iocb = arg64.lpfc_iocb;
7299+ arg32.lpfc_flag = arg64.lpfc_flag;
7300+ arg32.lpfc_arg1 = (u32)(unsigned long) arg64.lpfc_arg1;
7301+ arg32.lpfc_arg2 = (u32)(unsigned long) arg64.lpfc_arg2;
7302+ arg32.lpfc_arg3 = (u32)(unsigned long) arg64.lpfc_arg3;
7303+ arg32.lpfc_dataout = (u32)(unsigned long) arg64.lpfc_dataout;
7304+ arg32.lpfc_cmd = arg64.lpfc_cmd;
7305+ arg32.lpfc_outsz = arg64.lpfc_outsz;
7306+ arg32.lpfc_arg4 = arg64.lpfc_arg4;
7307+ arg32.lpfc_arg5 = arg64.lpfc_arg5;
7308+
7309+ if(copy_to_user((void __user *)arg, &arg32,
7310+ sizeof(struct lpfcCmdInput32)))
7311+ return -EFAULT;
7312+
7313+ return -ret;
7314+}
7315+#endif
7316+
7317+static struct file_operations lpfc_fops = {
7318+ .owner = THIS_MODULE,
7319+ .ioctl = lpfcdfc_ioctl,
7320+#ifdef CONFIG_COMPAT
7321+ .compat_ioctl = lpfcdfc_compat_ioctl,
7322+#endif
7323+};
7324+
7325+int
7326+lpfc_cdev_init(void)
7327+{
7328+
7329+ lpfcdfc_major = register_chrdev(0, LPFC_CHAR_DEV_NAME, &lpfc_fops);
7330+ if (lpfcdfc_major < 0) {
7331+ printk(KERN_ERR "%s:%d Unable to register \"%s\" device.\n",
7332+ __func__, __LINE__, LPFC_CHAR_DEV_NAME);
7333+ return lpfcdfc_major;
7334+ }
7335+
7336+ return 0;
7337+}
7338+
7339+void
7340+lpfc_cdev_exit(void)
7341+{
7342+ unregister_chrdev(lpfcdfc_major, LPFC_CHAR_DEV_NAME);
7343+}
7344--- /dev/null
7345+++ b/drivers/scsi/lpfc/lpfc_ioctl.h
7346@@ -0,0 +1,184 @@
7347+/*******************************************************************
7348+ * This file is part of the Emulex Linux Device Driver for *
7349+ * Fibre Channel Host Bus Adapters. *
7350+ * Copyright (C) 2006 Emulex. All rights reserved. *
7351+ * EMULEX and SLI are trademarks of Emulex. *
7352+ * www.emulex.com *
7353+ * *
7354+ * This program is free software; you can redistribute it and/or *
7355+ * modify it under the terms of version 2 of the GNU General *
7356+ * Public License as published by the Free Software Foundation. *
7357+ * This program is distributed in the hope that it will be useful. *
7358+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
7359+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
7360+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
7361+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
7362+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
7363+ * more details, a copy of which can be found in the file COPYING *
7364+ * included with this package. *
7365+ *******************************************************************/
7366+
7367+#define DFC_MAJOR_REV 81
7368+#define DFC_MINOR_REV 0
7369+
7370+#define LPFC_MAX_EVENT 128
7371+
7372+#define LPFC_CT 0x42 /* Send CT passthru command */
7373+#define LPFC_HBA_RNID 0x52 /* Send an RNID request */
7374+#define LPFC_HBA_REFRESHINFO 0x56 /* Do a refresh of the stats */
7375+#define LPFC_SEND_ELS 0x57 /* Send out an ELS command */
7376+#define LPFC_HBA_SET_EVENT 0x59 /* Set FCP event(s) */
7377+#define LPFC_HBA_GET_EVENT 0x5a /* Get FCP event(s) */
7378+#define LPFC_HBA_SEND_MGMT_CMD 0x5b /* Send a management command */
7379+#define LPFC_HBA_SEND_MGMT_RSP 0x5c /* Send a management response */
7380+
7381+#define LPFC_GET_DFC_REV 0x68 /* Get the rev of the ioctl
7382+ driver */
7383+#define LPFC_LOOPBACK_TEST 0x72 /* Run Loopback test */
7384+#define LPFC_LOOPBACK_MODE 0x73 /* Enter Loopback mode */
7385+/* LPFC_LAST_IOCTL_USED 0x73 Last LPFC Ioctl used */
7386+
7387+#define INTERNAL_LOOP_BACK 0x1
7388+#define EXTERNAL_LOOP_BACK 0x2
7389+
7390+/* the DfcRevInfo structure */
7391+struct DfcRevInfo {
7392+ uint32_t a_Major;
7393+ uint32_t a_Minor;
7394+} ;
7395+
7396+#define LPFC_WWPN_TYPE 0
7397+#define LPFC_PORTID_TYPE 1
7398+#define LPFC_WWNN_TYPE 2
7399+
7400+struct nport_id {
7401+ uint32_t idType; /* 0 - wwpn, 1 - d_id, 2 - wwnn */
7402+ uint32_t d_id;
7403+ uint8_t wwpn[8];
7404+};
7405+
7406+#define LPFC_EVENT_LIP_OCCURRED 1
7407+#define LPFC_EVENT_LINK_UP 2
7408+#define LPFC_EVENT_LINK_DOWN 3
7409+#define LPFC_EVENT_LIP_RESET_OCCURRED 4
7410+#define LPFC_EVENT_RSCN 5
7411+#define LPFC_EVENT_PROPRIETARY 0xFFFF
7412+
7413+struct lpfc_hba_event_info {
7414+ uint32_t event_code;
7415+ uint32_t port_id;
7416+ union {
7417+ uint32_t rscn_event_info;
7418+ uint32_t pty_event_info;
7419+ } event;
7420+};
7421+
7422+
7423+#define LPFC_CHAR_DEV_NAME "lpfcdfc"
7424+
7425+/*
7426+ * Diagnostic (DFC) Command & Input structures: (LPFC)
7427+ */
7428+struct lpfcCmdInput {
7429+ short lpfc_brd;
7430+ short lpfc_ring;
7431+ short lpfc_iocb;
7432+ short lpfc_flag;
7433+ void *lpfc_arg1;
7434+ void *lpfc_arg2;
7435+ void *lpfc_arg3;
7436+ char *lpfc_dataout;
7437+ uint32_t lpfc_cmd;
7438+ uint32_t lpfc_outsz;
7439+ uint32_t lpfc_arg4;
7440+ uint32_t lpfc_arg5;
7441+};
7442+/* Used for ioctl command */
7443+#define LPFC_DFC_CMD_IOCTL_MAGIC 0xFC
7444+#define LPFC_DFC_CMD_IOCTL _IOWR(LPFC_DFC_CMD_IOCTL_MAGIC, 0x1,\
7445+ struct lpfcCmdInput)
7446+
7447+#ifdef CONFIG_COMPAT
7448+/* 32 bit version */
7449+struct lpfcCmdInput32 {
7450+ short lpfc_brd;
7451+ short lpfc_ring;
7452+ short lpfc_iocb;
7453+ short lpfc_flag;
7454+ u32 lpfc_arg1;
7455+ u32 lpfc_arg2;
7456+ u32 lpfc_arg3;
7457+ u32 lpfc_dataout;
7458+ uint32_t lpfc_cmd;
7459+ uint32_t lpfc_outsz;
7460+ uint32_t lpfc_arg4;
7461+ uint32_t lpfc_arg5;
7462+};
7463+#endif
7464+
7465+#define SLI_CT_ELX_LOOPBACK 0x10
7466+
7467+enum ELX_LOOPBACK_CMD {
7468+ ELX_LOOPBACK_XRI_SETUP,
7469+ ELX_LOOPBACK_DATA,
7470+};
7471+
7472+
7473+struct lpfc_link_info {
7474+ uint32_t a_linkEventTag;
7475+ uint32_t a_linkUp;
7476+ uint32_t a_linkDown;
7477+ uint32_t a_linkMulti;
7478+ uint32_t a_DID;
7479+ uint8_t a_topology;
7480+ uint8_t a_linkState;
7481+ uint8_t a_alpa;
7482+ uint8_t a_alpaCnt;
7483+ uint8_t a_alpaMap[128];
7484+ uint8_t a_wwpName[8];
7485+ uint8_t a_wwnName[8];
7486+};
7487+
7488+enum lpfc_host_event_code {
7489+ LPFCH_EVT_LIP = 0x1,
7490+ LPFCH_EVT_LINKUP = 0x2,
7491+ LPFCH_EVT_LINKDOWN = 0x3,
7492+ LPFCH_EVT_LIPRESET = 0x4,
7493+ LPFCH_EVT_RSCN = 0x5,
7494+ LPFCH_EVT_ADAPTER_CHANGE = 0x103,
7495+ LPFCH_EVT_PORT_UNKNOWN = 0x200,
7496+ LPFCH_EVT_PORT_OFFLINE = 0x201,
7497+ LPFCH_EVT_PORT_ONLINE = 0x202,
7498+ LPFCH_EVT_PORT_FABRIC = 0x204,
7499+ LPFCH_EVT_LINK_UNKNOWN = 0x500,
7500+ LPFCH_EVT_VENDOR_UNIQUE = 0xffff,
7501+};
7502+
7503+#define ELX_LOOPBACK_HEADER_SZ \
7504+ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
7505+
7506+struct lpfc_host_event {
7507+ uint32_t seq_num;
7508+ enum lpfc_host_event_code event_code;
7509+ uint32_t data;
7510+};
7511+
7512+struct lpfc_timedout_iocb_ctxt {
7513+ struct lpfc_iocbq *rspiocbq;
7514+ struct lpfc_dmabuf *mp;
7515+ struct lpfc_dmabuf *bmp;
7516+ struct lpfc_scsi_buf *lpfc_cmd;
7517+ struct lpfc_dmabufext *outdmp;
7518+ struct lpfc_dmabufext *indmp;
7519+};
7520+
7521+#ifdef __KERNEL__
7522+struct lpfcdfc_host;
7523+
7524+/* Initialize/Un-initialize char device */
7525+int lpfc_cdev_init(void);
7526+void lpfc_cdev_exit(void);
7527+void lpfcdfc_host_del(struct lpfcdfc_host *);
7528+struct lpfcdfc_host *lpfcdfc_host_add(struct pci_dev *, struct Scsi_Host *,
7529+ struct lpfc_hba *);
7530+#endif /* __KERNEL__ */
7531--- a/drivers/scsi/lpfc/lpfc_logmsg.h
7532+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
7533@@ -32,6 +32,7 @@
7534 #define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
7535 #define LOG_LIBDFC 0x2000 /* Libdfc events */
7536 #define LOG_VPORT 0x4000 /* NPIV events */
7537+#define LOG_SECURITY 0x8000 /* FC Security */
7538 #define LOG_ALL_MSG 0xffff /* LOG all messages */
7539
7540 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
7541--- a/drivers/scsi/lpfc/lpfc_mbox.c
7542+++ b/drivers/scsi/lpfc/lpfc_mbox.c
7543@@ -1083,7 +1083,7 @@ lpfc_config_port(struct lpfc_hba *phba,
7544 phba->pcb->feature = FEATURE_INITIAL_SLI2;
7545
7546 /* Setup Mailbox pointers */
7547- phba->pcb->mailBoxSize = sizeof(MAILBOX_t);
7548+ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
7549 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
7550 pdma_addr = phba->slim2p.phys + offset;
7551 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
7552--- /dev/null
7553+++ b/drivers/scsi/lpfc/lpfc_menlo.c
7554@@ -0,0 +1,1174 @@
7555+/*******************************************************************
7556+ * This file is part of the Emulex Linux Device Driver for *
7557+ * Fibre Channel Host Bus Adapters. *
7558+ * Copyright (C) 2007-2008 Emulex. All rights reserved. *
7559+ * EMULEX and SLI are trademarks of Emulex. *
7560+ * www.emulex.com *
7561+ * *
7562+ * This program is free software; you can redistribute it and/or *
7563+ * modify it under the terms of version 2 of the GNU General *
7564+ * Public License as published by the Free Software Foundation. *
7565+ * This program is distributed in the hope that it will be useful. *
7566+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
7567+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
7568+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
7569+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
7570+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
7571+ * more details, a copy of which can be found in the file COPYING *
7572+ * included with this package. *
7573+ *******************************************************************/
7574+
7575+#include <linux/ctype.h>
7576+#include <linux/delay.h>
7577+#include <linux/pci.h>
7578+#include <linux/interrupt.h>
7579+
7580+#include <scsi/scsi.h>
7581+#include <scsi/scsi_device.h>
7582+#include <scsi/scsi_host.h>
7583+#include <scsi/scsi_tcq.h>
7584+#include <scsi/scsi_transport_fc.h>
7585+
7586+#include "lpfc_hw.h"
7587+#include "lpfc_sli.h"
7588+#include "lpfc_nl.h"
7589+#include "lpfc_disc.h"
7590+#include "lpfc_scsi.h"
7591+#include "lpfc.h"
7592+#include "lpfc_logmsg.h"
7593+#include "lpfc_version.h"
7594+#include "lpfc_compat.h"
7595+#include "lpfc_crtn.h"
7596+#include "lpfc_vport.h"
7597+
7598+#define MENLO_CMD_FW_DOWNLOAD 0x00000002
7599+
7600+static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *,
7601+ struct lpfc_iocbq *, struct lpfc_iocbq *);
7602+
7603+extern int
7604+__dfc_cmd_data_free(struct lpfc_hba * phba, struct lpfc_dmabufext * mlist);
7605+
7606+extern struct lpfc_dmabufext *
7607+__dfc_cmd_data_alloc(struct lpfc_hba * phba,
7608+ char *indataptr, struct ulp_bde64 * bpl, uint32_t size,
7609+ int nocopydata);
7610+/*
7611+ * The size for the menlo interface is set at 336k because it only uses
7612+ * one bpl. A bpl can contain 85 BDE descriptors. Each BDE can represent
7613+ * up to 4k. I used 84 BDE entries to do this calculation because the
7614+ * 1st sysfs_menlo_write is for just the cmd header which is 12 bytes.
7615+ * size = PAGE_SZ * (sizeof(bpl) / sizeof(BDE)) -1;
7616+ */
7617+#define SYSFS_MENLO_ATTR_SIZE 344064
7618+typedef struct menlo_get_cmd
7619+{
7620+ uint32_t code; /* Command code */
7621+ uint32_t context; /* Context */
7622+ uint32_t length; /* Max response length */
7623+} menlo_get_cmd_t;
7624+
7625+typedef struct menlo_init_rsp
7626+{
7627+ uint32_t code;
7628+ uint32_t bb_credit; /* Menlo FC BB Credit */
7629+ uint32_t frame_size; /* Menlo FC receive frame size */
7630+ uint32_t fw_version; /* Menlo firmware version */
7631+ uint32_t reset_status; /* Reason for previous reset */
7632+
7633+#define MENLO_RESET_STATUS_NORMAL 0
7634+#define MENLO_RESET_STATUS_PANIC 1
7635+
7636+ uint32_t maint_status; /* Menlo Maintenance Mode status at link up */
7637+
7638+
7639+#define MENLO_MAINTENANCE_MODE_DISABLE 0
7640+#define MENLO_MAINTENANCE_MODE_ENABLE 1
7641+ uint32_t fw_type;
7642+ uint32_t fru_data_valid; /* 0=invalid, 1=valid */
7643+} menlo_init_rsp_t;
7644+
7645+#define MENLO_CMD_GET_INIT 0x00000007
7646+#define MENLO_FW_TYPE_OPERATIONAL 0xABCD0001
7647+#define MENLO_FW_TYPE_GOLDEN 0xABCD0002
7648+#define MENLO_FW_TYPE_DIAG 0xABCD0003
7649+
7650+void
7651+BE_swap32_buffer(void *srcp, uint32_t cnt)
7652+{
7653+ uint32_t *src = srcp;
7654+ uint32_t *dest = srcp;
7655+ uint32_t ldata;
7656+ int i;
7657+
7658+ for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
7659+ ldata = *src;
7660+ ldata = cpu_to_le32(ldata);
7661+ *dest = ldata;
7662+ src++;
7663+ dest++;
7664+ }
7665+}
7666+
7667+
7668+static int
7669+lpfc_alloc_menlo_genrequest64(struct lpfc_hba * phba,
7670+ struct lpfc_menlo_genreq64 *sysfs_menlo,
7671+ struct lpfc_sysfs_menlo_hdr *cmdhdr)
7672+{
7673+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7674+ struct ulp_bde64 *bpl = NULL;
7675+ IOCB_t *cmd = NULL, *rsp = NULL;
7676+ struct lpfc_sli *psli = NULL;
7677+ struct lpfc_sli_ring *pring = NULL;
7678+ int rc = 0;
7679+ uint32_t cmdsize;
7680+ uint32_t rspsize;
7681+
7682+ psli = &phba->sli;
7683+ pring = &psli->ring[LPFC_ELS_RING];
7684+
7685+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
7686+ rc = EACCES;
7687+ goto send_menlomgmt_cmd_exit;
7688+ }
7689+
7690+ if (!sysfs_menlo) {
7691+ rc = EINVAL;
7692+ goto send_menlomgmt_cmd_exit;
7693+ }
7694+
7695+ cmdsize = cmdhdr->cmdsize;
7696+ rspsize = cmdhdr->rspsize;
7697+
7698+ if (!cmdsize || !rspsize || (cmdsize + rspsize > 80 * BUF_SZ_4K)) {
7699+ rc = ERANGE;
7700+ goto send_menlomgmt_cmd_exit;
7701+ }
7702+
7703+ spin_lock_irq(shost->host_lock);
7704+ sysfs_menlo->cmdiocbq = lpfc_sli_get_iocbq(phba);
7705+ if (!sysfs_menlo->cmdiocbq) {
7706+ rc = ENOMEM;
7707+ spin_unlock_irq(shost->host_lock);
7708+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7709+ "1202 alloc_menlo_genreq64: couldn't alloc cmdiocbq\n");
7710+ goto send_menlomgmt_cmd_exit;
7711+ }
7712+ cmd = &sysfs_menlo->cmdiocbq->iocb;
7713+
7714+ sysfs_menlo->rspiocbq = lpfc_sli_get_iocbq(phba);
7715+ if (!sysfs_menlo->rspiocbq) {
7716+ rc = ENOMEM;
7717+ spin_unlock_irq(shost->host_lock);
7718+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7719+ "1203 alloc_menlo_genreq64: couldn't alloc rspiocbq\n");
7720+ goto send_menlomgmt_cmd_exit;
7721+ }
7722+ spin_unlock_irq(shost->host_lock);
7723+
7724+ rsp = &sysfs_menlo->rspiocbq->iocb;
7725+
7726+
7727+ sysfs_menlo->bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
7728+ if (!sysfs_menlo->bmp) {
7729+ rc = ENOMEM;
7730+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7731+ "1204 alloc_menlo_genreq64: couldn't alloc bmp\n");
7732+ goto send_menlomgmt_cmd_exit;
7733+ }
7734+
7735+ spin_lock_irq(shost->host_lock);
7736+ sysfs_menlo->bmp->virt = lpfc_mbuf_alloc(phba, 0,
7737+ &sysfs_menlo->bmp->phys);
7738+ if (!sysfs_menlo->bmp->virt) {
7739+ rc = ENOMEM;
7740+ spin_unlock_irq(shost->host_lock);
7741+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7742+ "1205 alloc_menlo_genreq64: couldn't alloc bpl\n");
7743+ goto send_menlomgmt_cmd_exit;
7744+ }
7745+ spin_unlock_irq(shost->host_lock);
7746+
7747+ INIT_LIST_HEAD(&sysfs_menlo->bmp->list);
7748+ bpl = (struct ulp_bde64 *) sysfs_menlo->bmp->virt;
7749+ memset((uint8_t*)bpl, 0 , 1024);
7750+ sysfs_menlo->indmp = __dfc_cmd_data_alloc(phba, NULL, bpl, cmdsize, 1);
7751+ if (!sysfs_menlo->indmp) {
7752+ rc = ENOMEM;
7753+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7754+ "1206 alloc_menlo_genreq64: couldn't alloc cmdbuf\n");
7755+ goto send_menlomgmt_cmd_exit;
7756+ }
7757+ sysfs_menlo->cmdbpl = bpl;
7758+ INIT_LIST_HEAD(&sysfs_menlo->inhead);
7759+ list_add_tail(&sysfs_menlo->inhead, &sysfs_menlo->indmp->dma.list);
7760+
7761+ /* flag contains total number of BPLs for xmit */
7762+
7763+ bpl += sysfs_menlo->indmp->flag;
7764+
7765+ sysfs_menlo->outdmp = __dfc_cmd_data_alloc(phba, NULL, bpl, rspsize, 0);
7766+ if (!sysfs_menlo->outdmp) {
7767+ rc = ENOMEM;
7768+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7769+ "1207 alloc_menlo_genreq64: couldn't alloc rspbuf\n");
7770+ goto send_menlomgmt_cmd_exit;
7771+ }
7772+ INIT_LIST_HEAD(&sysfs_menlo->outhead);
7773+ list_add_tail(&sysfs_menlo->outhead, &sysfs_menlo->outdmp->dma.list);
7774+
7775+ cmd->un.genreq64.bdl.ulpIoTag32 = 0;
7776+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(sysfs_menlo->bmp->phys);
7777+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(sysfs_menlo->bmp->phys);
7778+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
7779+ cmd->un.genreq64.bdl.bdeSize =
7780+ (sysfs_menlo->outdmp->flag + sysfs_menlo->indmp->flag)
7781+ * sizeof(struct ulp_bde64);
7782+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
7783+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
7784+ cmd->un.genreq64.w5.hcsw.Dfctl = 0;
7785+ cmd->un.genreq64.w5.hcsw.Rctl = FC_FCP_CMND;
7786+ cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
7787+ cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
7788+ cmd->ulpBdeCount = 1;
7789+ cmd->ulpClass = CLASS3;
7790+ cmd->ulpContext = MENLO_CONTEXT; /* 0 */
7791+ cmd->ulpOwner = OWN_CHIP;
7792+ cmd->ulpPU = MENLO_PU; /* 3 */
7793+ cmd->ulpLe = 1; /* Limited Edition */
7794+ sysfs_menlo->cmdiocbq->vport = phba->pport;
7795+ sysfs_menlo->cmdiocbq->context1 = NULL;
7796+ sysfs_menlo->cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
7797+ /* We want the firmware to timeout before we do */
7798+ cmd->ulpTimeout = MENLO_TIMEOUT - 5;
7799+
7800+ sysfs_menlo->timeout = cmd->ulpTimeout;
7801+
7802+send_menlomgmt_cmd_exit:
7803+ return rc;
7804+}
7805+
7806+void
7807+sysfs_menlo_genreq_free(struct lpfc_hba *phba,
7808+ struct lpfc_menlo_genreq64 *sysfs_menlo)
7809+{
7810+ if ( !list_empty(&sysfs_menlo->outhead))
7811+ list_del_init( &sysfs_menlo->outhead);
7812+
7813+ if (!list_empty(&sysfs_menlo->inhead))
7814+ list_del_init( &sysfs_menlo->inhead);
7815+
7816+ if (sysfs_menlo->outdmp) {
7817+ __dfc_cmd_data_free(phba, sysfs_menlo->outdmp);
7818+ sysfs_menlo->outdmp = NULL;
7819+ }
7820+ if (sysfs_menlo->indmp) {
7821+ __dfc_cmd_data_free(phba, sysfs_menlo->indmp);
7822+ sysfs_menlo->indmp = NULL;
7823+ }
7824+ if (sysfs_menlo->bmp) {
7825+ lpfc_mbuf_free(phba, sysfs_menlo->bmp->virt,
7826+ sysfs_menlo->bmp->phys);
7827+ kfree(sysfs_menlo->bmp);
7828+ sysfs_menlo->bmp = NULL;
7829+ }
7830+ if (sysfs_menlo->rspiocbq) {
7831+ lpfc_sli_release_iocbq(phba, sysfs_menlo->rspiocbq);
7832+ sysfs_menlo->rspiocbq = NULL;
7833+ }
7834+
7835+ if (sysfs_menlo->cmdiocbq) {
7836+ lpfc_sli_release_iocbq(phba, sysfs_menlo->cmdiocbq);
7837+ sysfs_menlo->cmdiocbq = NULL;
7838+ }
7839+}
7840+
7841+static void
7842+sysfs_menlo_idle(struct lpfc_hba *phba,
7843+ struct lpfc_sysfs_menlo *sysfs_menlo)
7844+{
7845+ struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7846+
7847+ spin_lock_irq(&phba->hbalock);
7848+ list_del_init(&sysfs_menlo->list);
7849+ spin_unlock_irq(&phba->hbalock);
7850+ spin_lock_irq(shost->host_lock);
7851+
7852+ if (sysfs_menlo->cr.cmdiocbq)
7853+ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cr);
7854+ if (sysfs_menlo->cx.cmdiocbq)
7855+ sysfs_menlo_genreq_free(phba, &sysfs_menlo->cx);
7856+
7857+ spin_unlock_irq(shost->host_lock);
7858+ kfree(sysfs_menlo);
7859+}
7860+
7861+static void
7862+lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *phba,
7863+ struct lpfc_iocbq *cmdq,
7864+ struct lpfc_iocbq *rspq)
7865+{
7866+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7867+ "1241 Menlo IOCB timeout: deleting %p\n",
7868+ cmdq->context3);
7869+ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context3);
7870+}
7871+
7872+static void
7873+lpfc_menlo_iocb_cmpl(struct lpfc_hba *phba,
7874+ struct lpfc_iocbq *cmdq,
7875+ struct lpfc_iocbq *rspq)
7876+{
7877+ struct lpfc_sysfs_menlo * sysfs_menlo =
7878+ (struct lpfc_sysfs_menlo *)cmdq->context2;
7879+ struct lpfc_dmabufext *mlast = NULL;
7880+ IOCB_t *rsp = NULL;
7881+ IOCB_t *cmd = NULL;
7882+ uint32_t * tmpptr = NULL;
7883+ menlo_init_rsp_t *mlorsp = NULL;
7884+
7885+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7886+ "1254 Menlo IOCB complete: %p\n",
7887+ cmdq->context2);
7888+ rsp = &rspq->iocb;
7889+ cmd = &cmdq->iocb;
7890+ if ( !sysfs_menlo ) {
7891+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7892+ "1255 Menlo IOCB complete:NULL CTX \n");
7893+ return;
7894+ }
7895+ if ( rsp->ulpStatus ) {
7896+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7897+ "1242 iocb async cmpl: ulpStatus 0x%x "
7898+ "ulpWord[4] 0x%x\n",
7899+ rsp->ulpStatus, rsp->un.ulpWord[4]);
7900+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7901+ "1260 cr:%.08x %.08x %.08x %.08x "
7902+ "%.08x %.08x %.08x %.08x\n",
7903+ cmd->un.ulpWord[0], cmd->un.ulpWord[1],
7904+ cmd->un.ulpWord[2], cmd->un.ulpWord[3],
7905+ cmd->un.ulpWord[4], cmd->un.ulpWord[5],
7906+ *(uint32_t *)&cmd->un1, *((uint32_t *)&cmd->un1 + 1));
7907+ mlast = list_get_first(&sysfs_menlo->cr.inhead,
7908+ struct lpfc_dmabufext,
7909+ dma.list);
7910+ if (!mlast) {
7911+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
7912+ "1231 bad bpl:\n");
7913+ goto lpfc_menlo_iocb_cmpl_ext;
7914+ }
7915+ tmpptr = ( uint32_t *) mlast->dma.virt;
7916+ BE_swap32_buffer ((uint8_t *) tmpptr,
7917+ sizeof( menlo_get_cmd_t));
7918+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7919+ "1261 cmd:%.08x %.08x %.08x\n",
7920+ *tmpptr, *(tmpptr+1), *(tmpptr+2));
7921+ goto lpfc_menlo_iocb_cmpl_ext;
7922+ }
7923+
7924+ mlast = list_get_first(&sysfs_menlo->cr.outhead,
7925+ struct lpfc_dmabufext,
7926+ dma.list);
7927+ if (!mlast) {
7928+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
7929+ "1256 bad bpl:\n");
7930+ goto lpfc_menlo_iocb_cmpl_ext;
7931+ }
7932+ mlorsp = ( menlo_init_rsp_t *) mlast->dma.virt;
7933+ BE_swap32_buffer ((uint8_t *) mlorsp,
7934+ sizeof( menlo_init_rsp_t));
7935+
7936+ if (mlorsp->code != 0) {
7937+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
7938+ "1243 Menlo command error. code=%d.\n", mlorsp->code);
7939+ goto lpfc_menlo_iocb_cmpl_ext;
7940+
7941+ }
7942+
7943+ switch (mlorsp->fw_type)
7944+ {
7945+ case MENLO_FW_TYPE_OPERATIONAL: /* Menlo Operational */
7946+ break;
7947+ case MENLO_FW_TYPE_GOLDEN: /* Menlo Golden */
7948+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
7949+ "1246 FCoE chip is running golden firmware. "
7950+ "Update FCoE chip firmware immediately %x\n",
7951+ mlorsp->fw_type);
7952+ break;
7953+ case MENLO_FW_TYPE_DIAG: /* Menlo Diag */
7954+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
7955+ "1247 FCoE chip is running diagnostic "
7956+ "firmware. Operational use suspended. %x\n",
7957+ mlorsp->fw_type);
7958+ break;
7959+ default:
7960+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
7961+ "1248 FCoE chip is running unknown "
7962+ "firmware x%x.\n", mlorsp->fw_type);
7963+ break;
7964+ }
7965+ if (!mlorsp->fru_data_valid
7966+ && (mlorsp->fw_type == MENLO_FW_TYPE_OPERATIONAL)
7967+ && (!mlorsp->maint_status))
7968+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
7969+ "1249 Invalid FRU data found on adapter."
7970+ "Return adapter to Emulex for repair\n");
7971+
7972+lpfc_menlo_iocb_cmpl_ext:
7973+ sysfs_menlo_idle(phba, (struct lpfc_sysfs_menlo *)cmdq->context2);
7974+}
7975+
7976+static struct lpfc_sysfs_menlo *
7977+lpfc_get_sysfs_menlo(struct lpfc_hba *phba, uint8_t create)
7978+{
7979+ struct lpfc_sysfs_menlo *sysfs_menlo;
7980+ pid_t pid;
7981+
7982+ pid = current->pid;
7983+
7984+ spin_lock_irq(&phba->hbalock);
7985+ list_for_each_entry(sysfs_menlo, &phba->sysfs_menlo_list, list) {
7986+ if (sysfs_menlo->pid == pid) {
7987+ spin_unlock_irq(&phba->hbalock);
7988+ return sysfs_menlo;
7989+ }
7990+ }
7991+ if (!create) {
7992+ spin_unlock_irq(&phba->hbalock);
7993+ return NULL;
7994+ }
7995+ spin_unlock_irq(&phba->hbalock);
7996+ sysfs_menlo = kzalloc(sizeof(struct lpfc_sysfs_menlo),
7997+ GFP_KERNEL);
7998+ if (!sysfs_menlo)
7999+ return NULL;
8000+ sysfs_menlo->state = SMENLO_IDLE;
8001+ sysfs_menlo->pid = pid;
8002+ spin_lock_irq(&phba->hbalock);
8003+ list_add_tail(&sysfs_menlo->list, &phba->sysfs_menlo_list);
8004+
8005+ spin_unlock_irq(&phba->hbalock);
8006+ return sysfs_menlo;
8007+
8008+}
8009+
8010+static ssize_t
8011+lpfc_menlo_write(struct lpfc_hba *phba,
8012+ char *buf, loff_t off, size_t count)
8013+{
8014+ struct lpfc_sysfs_menlo *sysfs_menlo;
8015+ struct lpfc_dmabufext *mlast = NULL;
8016+ struct lpfc_sysfs_menlo_hdr cmdhdrCR;
8017+ struct lpfc_menlo_genreq64 *genreq = NULL;
8018+ loff_t temp_off = 0;
8019+ struct ulp_bde64 *bpl = NULL;
8020+ int mlastcnt = 0;
8021+ uint32_t * tmpptr = NULL;
8022+ uint32_t addr_high = 0;
8023+ uint32_t addr_low = 0;
8024+ int hdr_offset = sizeof(struct lpfc_sysfs_menlo_hdr);
8025+
8026+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
8027+ return -EINVAL;
8028+
8029+ if (count == 0)
8030+ return 0;
8031+
8032+ if (off == 0) {
8033+ ssize_t rc;
8034+ struct lpfc_sysfs_menlo_hdr *cmdhdr =
8035+ (struct lpfc_sysfs_menlo_hdr *)buf;
8036+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8037+ "1208 menlo_write: cmd %x cmdsz %d rspsz %d\n",
8038+ cmdhdr->cmd, cmdhdr->cmdsize,
8039+ cmdhdr->rspsize);
8040+ if (count != sizeof(struct lpfc_sysfs_menlo_hdr)) {
8041+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8042+ "1210 Invalid cmd size: cmd %x "
8043+ "cmdsz %d rspsz %d\n",
8044+ cmdhdr->cmd, cmdhdr->cmdsize,
8045+ cmdhdr->rspsize);
8046+ return -EINVAL;
8047+ }
8048+
8049+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 1);
8050+ if (!sysfs_menlo)
8051+ return -ENOMEM;
8052+ sysfs_menlo->cmdhdr = *cmdhdr;
8053+ if (cmdhdr->cmd == MENLO_CMD_FW_DOWNLOAD) {
8054+ sysfs_menlo->cmdhdr.cmdsize
8055+ -= sizeof(struct lpfc_sysfs_menlo_hdr);
8056+
8057+ rc = lpfc_alloc_menlo_genrequest64(phba,
8058+ &sysfs_menlo->cx,
8059+ &sysfs_menlo->cmdhdr);
8060+ if (rc != 0) {
8061+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8062+ "1211 genreq alloc failed: %d\n",
8063+ (int) rc);
8064+ sysfs_menlo_idle(phba,sysfs_menlo);
8065+ return -ENOMEM;
8066+ }
8067+ cmdhdrCR.cmd = cmdhdr->cmd;
8068+ cmdhdrCR.cmdsize = sizeof(struct lpfc_sysfs_menlo_hdr);
8069+ cmdhdrCR.rspsize = 4;
8070+ } else
8071+ cmdhdrCR = *cmdhdr;
8072+
8073+ rc = lpfc_alloc_menlo_genrequest64(phba,
8074+ &sysfs_menlo->cr,&cmdhdrCR);
8075+ if (rc != 0) {
8076+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8077+ "1223 menlo_write: couldn't alloc genreq %d\n",
8078+ (int) rc);
8079+ sysfs_menlo_idle(phba,sysfs_menlo);
8080+ return -ENOMEM;
8081+ }
8082+ } else {
8083+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
8084+ if (!sysfs_menlo)
8085+ return -EAGAIN;
8086+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8087+ "1212 menlo_write: sysfs_menlo %p cmd %x cmdsz %d"
8088+ " rspsz %d cr-off %d cx-off %d count %d\n",
8089+ sysfs_menlo,
8090+ sysfs_menlo->cmdhdr.cmd,
8091+ sysfs_menlo->cmdhdr.cmdsize,
8092+ sysfs_menlo->cmdhdr.rspsize,
8093+ (int)sysfs_menlo->cr.offset,
8094+ (int)sysfs_menlo->cx.offset,
8095+ (int)count);
8096+ }
8097+
8098+ if ((count + sysfs_menlo->cr.offset) > sysfs_menlo->cmdhdr.cmdsize) {
8099+ if ( sysfs_menlo->cmdhdr.cmdsize != 4) {
8100+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8101+ "1213 FCoE cmd overflow: off %d + cnt %d > cmdsz %d\n",
8102+ (int)sysfs_menlo->cr.offset,
8103+ (int)count,
8104+ (int)sysfs_menlo->cmdhdr.cmdsize);
8105+ sysfs_menlo_idle(phba, sysfs_menlo);
8106+ return -ERANGE;
8107+ }
8108+ }
8109+
8110+ spin_lock_irq(&phba->hbalock);
8111+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
8112+ genreq = &sysfs_menlo->cx;
8113+ else
8114+ genreq = &sysfs_menlo->cr;
8115+
8116+ if (off == 0) {
8117+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
8118+ tmpptr = NULL;
8119+ genreq = &sysfs_menlo->cr;
8120+
8121+ if (!mlast) {
8122+ mlast = list_get_first(&genreq->inhead,
8123+ struct lpfc_dmabufext,
8124+ dma.list);
8125+ }
8126+ if (mlast) {
8127+ bpl = genreq->cmdbpl;
8128+ memcpy((uint8_t *) mlast->dma.virt, buf, count);
8129+ genreq->offset += count;
8130+ tmpptr = (uint32_t *)mlast->dma.virt;
8131+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8132+ "1258 cmd %x cmdsz %d rspsz %d "
8133+ "copied %d addrL:%x addrH:%x\n",
8134+ *tmpptr,
8135+ *(tmpptr+1),
8136+ *(tmpptr+2),
8137+ (int)count,
8138+ bpl->addrLow,bpl->addrHigh);
8139+ } else {
8140+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8141+ "1230 Could not find buffer for FCoE"
8142+ " cmd:off %d indmp %p %d\n", (int)off,
8143+ genreq->indmp,(int)count);
8144+ }
8145+ }
8146+
8147+ sysfs_menlo->state = SMENLO_WRITING;
8148+ spin_unlock_irq(&phba->hbalock);
8149+ return count;
8150+ } else {
8151+ ssize_t adj_off = off - sizeof(struct lpfc_sysfs_menlo_hdr);
8152+ int found = 0;
8153+ if (sysfs_menlo->state != SMENLO_WRITING ||
8154+ genreq->offset != adj_off) {
8155+ spin_unlock_irq(&phba->hbalock);
8156+ sysfs_menlo_idle(phba, sysfs_menlo);
8157+ return -EAGAIN;
8158+ }
8159+ mlast = NULL;
8160+ temp_off = sizeof(struct lpfc_sysfs_menlo_hdr);
8161+ if (genreq->indmp) {
8162+ list_for_each_entry(mlast,
8163+ &genreq->inhead, dma.list) {
8164+ if (temp_off == off)
8165+ break;
8166+ else
8167+ temp_off += BUF_SZ_4K;
8168+ mlastcnt++;
8169+ }
8170+ }
8171+ addr_low = le32_to_cpu( putPaddrLow(mlast->dma.phys) );
8172+ addr_high = le32_to_cpu( putPaddrHigh(mlast->dma.phys) );
8173+ bpl = genreq->cmdbpl;
8174+ bpl += mlastcnt;
8175+ if (bpl->addrLow != addr_low || bpl->addrHigh != addr_high) {
8176+ mlast = NULL;
8177+ list_for_each_entry(mlast,
8178+ &genreq->inhead, dma.list) {
8179+
8180+ addr_low = le32_to_cpu(
8181+ putPaddrLow(mlast->dma.phys) );
8182+ addr_high = le32_to_cpu(
8183+ putPaddrHigh(mlast->dma.phys) );
8184+ if (bpl->addrLow == addr_low
8185+ && bpl->addrHigh == addr_high) {
8186+ found = 1;
8187+ break;
8188+ }
8189+ if ( mlastcnt < 3 )
8190+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8191+ "1234 menlo_write: off:%d "
8192+ " mlastcnt:%d addl:%x addl:%x "
8193+ " addrh:%x addrh:%x mlast:%p\n",
8194+ (int)genreq->offset,
8195+ mlastcnt,
8196+ bpl->addrLow,
8197+ addr_low,
8198+ bpl->addrHigh,
8199+ addr_high,mlast);
8200+ }
8201+ } else
8202+ found = 1;
8203+
8204+ if (!found) {
8205+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8206+ "1235 Could not find buffer for FCoE"
8207+ " cmd: off:%d poff:%d cnt:%d"
8208+ " mlastcnt:%d addl:%x addh:%x mdsz:%d \n",
8209+ (int)genreq->offset,
8210+ (int)off,
8211+ (int)count,
8212+ mlastcnt,
8213+ bpl->addrLow,
8214+ bpl->addrHigh,
8215+ (int)sysfs_menlo->cmdhdr.cmdsize);
8216+ mlast = NULL;
8217+ }
8218+
8219+ }
8220+
8221+ if (mlast) {
8222+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD ) {
8223+ bpl = genreq->cmdbpl;
8224+ bpl += mlastcnt;
8225+ tmpptr = (uint32_t *)mlast->dma.virt;
8226+ if ( genreq->offset < hdr_offset ) {
8227+ memcpy((uint8_t *) mlast->dma.virt,
8228+ buf+hdr_offset,
8229+ count-hdr_offset);
8230+ bpl->tus.f.bdeSize = (ushort)count-hdr_offset;
8231+ mlast->size = (ushort)count-hdr_offset;
8232+ } else {
8233+ memcpy((uint8_t *) mlast->dma.virt, buf, count);
8234+ bpl->tus.f.bdeSize = (ushort)count;
8235+ mlast->size = (ushort)count;
8236+ }
8237+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
8238+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
8239+
8240+ } else
8241+ memcpy((uint8_t *) mlast->dma.virt, buf, count);
8242+
8243+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD
8244+ && genreq->offset < hdr_offset) {
8245+ if (sysfs_menlo->cr.indmp
8246+ && sysfs_menlo->cr.indmp->dma.virt) {
8247+ mlast = sysfs_menlo->cr.indmp;
8248+ memcpy((uint8_t *) mlast->dma.virt,
8249+ buf, hdr_offset);
8250+ tmpptr = (uint32_t *)mlast->dma.virt;
8251+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8252+ "1237 cmd %x cmd1 %x cmd2 %x "
8253+ "copied %d\n",
8254+ *tmpptr,
8255+ *(tmpptr+1),
8256+ *(tmpptr+2),
8257+ hdr_offset);
8258+ }
8259+ }
8260+ genreq->offset += count;
8261+ } else {
8262+ spin_unlock_irq(&phba->hbalock);
8263+ sysfs_menlo_idle(phba,sysfs_menlo);
8264+ return -ERANGE;
8265+ }
8266+
8267+ spin_unlock_irq(&phba->hbalock);
8268+ return count;
8269+
8270+}
8271+
8272+
8273+static ssize_t
8274+sysfs_menlo_write(struct kobject *kobj, struct bin_attribute *bin_attr,
8275+ char *buf, loff_t off, size_t count)
8276+{
8277+ struct device *dev = container_of(kobj, struct device, kobj);
8278+ struct Scsi_Host *shost = class_to_shost(dev);
8279+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8280+ struct lpfc_hba *phba = vport->phba;
8281+
8282+ return lpfc_menlo_write(phba, buf, off, count);
8283+}
8284+
8285+
8286+static ssize_t
8287+sysfs_menlo_issue_iocb_wait(struct lpfc_hba *phba,
8288+ struct lpfc_menlo_genreq64 *req,
8289+ struct lpfc_sysfs_menlo *sysfs_menlo)
8290+{
8291+ struct lpfc_sli *psli = NULL;
8292+ struct lpfc_sli_ring *pring = NULL;
8293+ int rc = 0;
8294+ IOCB_t *rsp = NULL;
8295+ struct lpfc_iocbq *cmdiocbq = NULL;
8296+
8297+ psli = &phba->sli;
8298+ pring = &psli->ring[LPFC_ELS_RING];
8299+ rsp = &req->rspiocbq->iocb;
8300+ cmdiocbq = req->cmdiocbq;
8301+
8302+ rc = lpfc_sli_issue_iocb_wait(phba, pring, req->cmdiocbq, req->rspiocbq,
8303+ req->timeout);
8304+
8305+ if (rc == IOCB_TIMEDOUT) {
8306+
8307+ cmdiocbq->context2 = NULL;
8308+ cmdiocbq->context3 = sysfs_menlo;
8309+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
8310+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
8311+ "1227 FCoE IOCB TMO: handler set for %p\n",
8312+ cmdiocbq->context3);
8313+ return -EACCES;
8314+ }
8315+
8316+ if (rc != IOCB_SUCCESS) {
8317+ rc = -EFAULT;
8318+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8319+ "1216 FCoE IOCB failed: off %d rc=%d \n",
8320+ (int)req->offset, rc);
8321+ goto sysfs_menlo_issue_iocb_wait_exit;
8322+ }
8323+
8324+ if (rsp->ulpStatus) {
8325+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
8326+ switch (rsp->un.ulpWord[4] & 0xff) {
8327+ case IOERR_SEQUENCE_TIMEOUT:
8328+ rc = -ETIMEDOUT;
8329+ break;
8330+ case IOERR_INVALID_RPI:
8331+ rc = -EFAULT;
8332+ break;
8333+ default:
8334+ rc = -EFAULT;
8335+ break;
8336+ }
8337+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8338+ "1217 mlo_issueIocb:2 off %d rc=%d "
8339+ "ulpWord[4] 0x%x\n",
8340+ (int)req->offset, rc, rsp->un.ulpWord[4]);
8341+ }
8342+ }
8343+sysfs_menlo_issue_iocb_wait_exit:
8344+ return rc;
8345+}
8346+
8347+
8348+static ssize_t
8349+sysfs_menlo_issue_iocb(struct lpfc_hba *phba, struct lpfc_menlo_genreq64 *req,
8350+ struct lpfc_sysfs_menlo *sysfs_menlo)
8351+{
8352+ struct lpfc_sli *psli = NULL;
8353+ struct lpfc_sli_ring *pring = NULL;
8354+ int rc = 0;
8355+ IOCB_t *rsp = NULL;
8356+ struct lpfc_iocbq *cmdiocbq = NULL;
8357+
8358+ psli = &phba->sli;
8359+ pring = &psli->ring[LPFC_ELS_RING];
8360+ rsp = &req->rspiocbq->iocb;
8361+ cmdiocbq = req->cmdiocbq;
8362+ cmdiocbq->context2 = sysfs_menlo;
8363+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_cmpl;
8364+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
8365+ "1257 lpfc_menlo_issue_iocb: handler set for %p\n",
8366+ cmdiocbq->context3);
8367+
8368+ rc = lpfc_sli_issue_iocb(phba, pring, req->cmdiocbq, 0);
8369+
8370+ if (rc == IOCB_TIMEDOUT) {
8371+
8372+ cmdiocbq->context2 = NULL;
8373+ cmdiocbq->context3 = sysfs_menlo;
8374+ cmdiocbq->iocb_cmpl = lpfc_menlo_iocb_timeout_cmpl;
8375+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
8376+ "1228 FCoE IOCB TMO: handler set for %p\n",
8377+ cmdiocbq->context3);
8378+ return -EACCES;
8379+ }
8380+
8381+ if (rc != IOCB_SUCCESS) {
8382+ rc = -EFAULT;
8383+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8384+ "1238 FCoE IOCB failed: off %d rc=%d \n",
8385+ (int)req->offset, rc);
8386+ goto sysfs_menlo_issue_iocb_exit;
8387+ }
8388+
8389+ if (rsp->ulpStatus) {
8390+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
8391+ switch (rsp->un.ulpWord[4] & 0xff) {
8392+ case IOERR_SEQUENCE_TIMEOUT:
8393+ rc = -ETIMEDOUT;
8394+ break;
8395+ case IOERR_INVALID_RPI:
8396+ rc = -EFAULT;
8397+ break;
8398+ default:
8399+ rc = -EFAULT;
8400+ break;
8401+ }
8402+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8403+ "1239 mlo_issueIocb:2 off %d rc=%d "
8404+ "ulpWord[4] 0x%x\n",
8405+ (int)req->offset, rc, rsp->un.ulpWord[4]);
8406+ }
8407+ }
8408+sysfs_menlo_issue_iocb_exit:
8409+ return rc;
8410+}
8411+
8412+static ssize_t
8413+lpfc_menlo_read(struct lpfc_hba *phba, char *buf, loff_t off, size_t count,
8414+ int wait)
8415+{
8416+ struct lpfc_sli *psli = NULL;
8417+ struct lpfc_sli_ring *pring = NULL;
8418+ int rc = 0;
8419+ struct lpfc_sysfs_menlo *sysfs_menlo;
8420+ struct lpfc_dmabufext *mlast = NULL;
8421+ loff_t temp_off = 0;
8422+ struct lpfc_menlo_genreq64 *genreq = NULL;
8423+ IOCB_t *cmd = NULL, *rsp = NULL;
8424+ uint32_t * uptr = NULL;
8425+
8426+
8427+ psli = &phba->sli;
8428+ pring = &psli->ring[LPFC_ELS_RING];
8429+
8430+ if (off > SYSFS_MENLO_ATTR_SIZE)
8431+ return -ERANGE;
8432+
8433+ if ((count + off) > SYSFS_MENLO_ATTR_SIZE)
8434+ count = SYSFS_MENLO_ATTR_SIZE - off;
8435+
8436+ if (off % 4 || count % 4 || (unsigned long)buf % 4)
8437+ return -EINVAL;
8438+
8439+ if (off && count == 0)
8440+ return 0;
8441+
8442+ sysfs_menlo = lpfc_get_sysfs_menlo(phba, 0);
8443+
8444+ if (!sysfs_menlo)
8445+ return -EPERM;
8446+
8447+ if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
8448+ sysfs_menlo_idle(phba, sysfs_menlo);
8449+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8450+ "1214 Can not issue FCoE cmd,"
8451+ " SLI not active: off %d rc= -EACCESS\n",
8452+ (int)off);
8453+ return -EACCES;
8454+ }
8455+
8456+
8457+ if ((phba->link_state < LPFC_LINK_UP)
8458+ && !(psli->sli_flag & LPFC_MENLO_MAINT)
8459+ && wait) {
8460+ rc = -EPERM;
8461+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
8462+ "1215 Can not issue FCoE cmd:"
8463+ " not ready or not in maint mode"
8464+ " off %d rc=%d \n",
8465+ (int)off, rc);
8466+ spin_lock_irq(&phba->hbalock);
8467+ goto lpfc_menlo_read_err_exit;
8468+ }
8469+
8470+ if (off == 0 && sysfs_menlo->state == SMENLO_WRITING) {
8471+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
8472+ spin_lock_irq(&phba->hbalock);
8473+ genreq = &sysfs_menlo->cr;
8474+ spin_unlock_irq(&phba->hbalock);
8475+ }
8476+ if ( wait )
8477+ rc = sysfs_menlo_issue_iocb_wait(phba,
8478+ &sysfs_menlo->cr,
8479+ sysfs_menlo);
8480+ else {
8481+ rc = sysfs_menlo_issue_iocb(phba,
8482+ &sysfs_menlo->cr,
8483+ sysfs_menlo);
8484+ return rc;
8485+ }
8486+
8487+ spin_lock_irq(&phba->hbalock);
8488+ if (rc < 0) {
8489+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
8490+ "1224 FCoE iocb failed: off %d rc=%d \n",
8491+ (int)off, rc);
8492+ if (rc != -EACCES)
8493+ goto lpfc_menlo_read_err_exit;
8494+ else {
8495+ spin_unlock_irq(&phba->hbalock);
8496+ return rc;
8497+ }
8498+ }
8499+
8500+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD) {
8501+ cmd = &sysfs_menlo->cx.cmdiocbq->iocb;
8502+ rsp = &sysfs_menlo->cr.rspiocbq->iocb;
8503+ mlast = list_get_first(&sysfs_menlo->cr.outhead,
8504+ struct lpfc_dmabufext,
8505+ dma.list);
8506+ if ( *((uint32_t *) mlast->dma.virt) != 0 ) {
8507+ memcpy(buf,(uint8_t *) mlast->dma.virt, count);
8508+ goto lpfc_menlo_read_err_exit;
8509+ }
8510+ mlast = NULL;
8511+
8512+ cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
8513+ cmd->ulpContext = rsp->ulpContext;
8514+ cmd->ulpPU = 1; /* RelOffset */
8515+ cmd->un.ulpWord[4] = 0; /* offset 0 */
8516+
8517+ spin_unlock_irq(&phba->hbalock);
8518+ rc = sysfs_menlo_issue_iocb_wait(phba, &sysfs_menlo->cx,
8519+ sysfs_menlo);
8520+ spin_lock_irq(&phba->hbalock);
8521+ if (rc < 0) {
8522+ uptr = (uint32_t *) rsp;
8523+
8524+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8525+ "1225 menlo_read: off %d rc=%d "
8526+ "rspxri %d cmdxri %d \n",
8527+ (int)off, rc, rsp->ulpContext,
8528+ cmd->ulpContext);
8529+ uptr = (uint32_t *)
8530+ &sysfs_menlo->cr.cmdiocbq->iocb;
8531+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8532+ "1236 cr:%.08x %.08x %.08x %.08x "
8533+ "%.08x %.08x %.08x %.08x %.08x\n",
8534+ *uptr, *(uptr+1), *(uptr+2),
8535+ *(uptr+3), *(uptr+4), *(uptr+5),
8536+ *(uptr+6), *(uptr+7), *(uptr+8));
8537+ uptr = (uint32_t *)rsp;
8538+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8539+ "1232 cr-rsp:%.08x %.08x %.08x %.08x "
8540+ "%.08x %.08x %.08x %.08x %.08x\n",
8541+ *uptr, *(uptr+1), *(uptr+2),
8542+ *(uptr+3), *(uptr+4), *(uptr+5),
8543+ *(uptr+6), *(uptr+7), *(uptr+8));
8544+ uptr = (uint32_t *)cmd;
8545+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8546+ "1233 cx:%.08x %.08x %.08x %.08x "
8547+ "%.08x %.08x %.08x %.08x %.08x\n",
8548+ *uptr, *(uptr+1), *(uptr+2),
8549+ *(uptr+3), *(uptr+4), *(uptr+5),
8550+ *(uptr+6), *(uptr+7), *(uptr+8));
8551+ if (rc != -EACCES)
8552+ goto lpfc_menlo_read_err_exit;
8553+ else {
8554+ spin_unlock_irq(&phba->hbalock);
8555+ return rc;
8556+ }
8557+ }
8558+ }
8559+ sysfs_menlo->state = SMENLO_READING;
8560+ sysfs_menlo->cr.offset = 0;
8561+
8562+ } else
8563+ spin_lock_irq(&phba->hbalock);
8564+
8565+ if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD)
8566+ genreq = &sysfs_menlo->cx;
8567+ else
8568+ genreq = &sysfs_menlo->cr;
8569+
8570+ /* Copy back response data */
8571+ if (sysfs_menlo->cmdhdr.rspsize > count) {
8572+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8573+ "1218 MloMgnt Rqst err Data: x%x %d %d %d %d\n",
8574+ genreq->outdmp->flag,
8575+ sysfs_menlo->cmdhdr.rspsize,
8576+ (int)count, (int)off, (int)genreq->offset);
8577+ }
8578+
8579+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
8580+ rc = -EAGAIN;
8581+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8582+ "1219 menlo_read:4 off %d rc=%d \n",
8583+ (int)off, rc);
8584+ goto lpfc_menlo_read_err_exit;
8585+ }
8586+ else if ( sysfs_menlo->state != SMENLO_READING) {
8587+ rc = -EAGAIN;
8588+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8589+ "1220 menlo_read:5 off %d reg off %d rc=%d state %x\n",
8590+ (int)off,(int)genreq->offset, sysfs_menlo->state, rc);
8591+ goto lpfc_menlo_read_err_exit;
8592+ }
8593+ temp_off = 0;
8594+ mlast = NULL;
8595+ list_for_each_entry(mlast, &genreq->outhead, dma.list) {
8596+ if (temp_off == off)
8597+ break;
8598+ else
8599+ temp_off += BUF_SZ_4K;
8600+ }
8601+ if (mlast)
8602+ memcpy(buf,(uint8_t *) mlast->dma.virt, count);
8603+ else {
8604+ rc = -ERANGE;
8605+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8606+ "1221 menlo_read:6 off %d rc=%d \n",
8607+ (int)off, rc);
8608+ goto lpfc_menlo_read_err_exit;
8609+ }
8610+ genreq->offset += count;
8611+
8612+
8613+ if (genreq->offset >= sysfs_menlo->cmdhdr.rspsize) {
8614+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
8615+ "1222 menlo_read: done off %d rc=%d"
8616+ " cnt %d rsp_code %x\n",
8617+ (int)off, rc, (int)count,*((uint32_t *)buf));
8618+ rc = count;
8619+ goto lpfc_menlo_read_err_exit;
8620+ }
8621+
8622+ if (count >= sysfs_menlo->cmdhdr.rspsize)
8623+ rc = sysfs_menlo->cmdhdr.rspsize;
8624+ else /* Can there be a > 4k response */
8625+ rc = count;
8626+ if (genreq->offset < sysfs_menlo->cmdhdr.rspsize) {
8627+ spin_unlock_irq(&phba->hbalock);
8628+ return rc;
8629+ }
8630+
8631+lpfc_menlo_read_err_exit:
8632+ spin_unlock_irq(&phba->hbalock);
8633+ sysfs_menlo_idle(phba,sysfs_menlo);
8634+ return rc;
8635+}
8636+
8637+
8638+static ssize_t
8639+sysfs_menlo_read(struct kobject *kobj, struct bin_attribute *bin_attr,
8640+ char *buf, loff_t off, size_t count)
8641+{
8642+ struct device *dev = container_of(kobj, struct device, kobj);
8643+ struct Scsi_Host *shost = class_to_shost(dev);
8644+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8645+ struct lpfc_hba *phba = vport->phba;
8646+
8647+ return lpfc_menlo_read(phba, buf, off, count, 1);
8648+}
8649+int need_non_blocking = 0;
8650+void lpfc_check_menlo_cfg(struct lpfc_hba *phba)
8651+{
8652+ uint32_t cmd_size;
8653+ uint32_t rsp_size;
8654+ menlo_get_cmd_t *cmd = NULL;
8655+ menlo_init_rsp_t *rsp = NULL;
8656+ int rc = 0;
8657+
8658+ lpfc_printf_log (phba, KERN_INFO, LOG_LINK_EVENT,
8659+ "1253 Checking FCoE chip firmware.\n");
8660+ if ( need_non_blocking ) /* Need non blocking issue_iocb */
8661+ return;
8662+
8663+ cmd_size = sizeof (menlo_get_cmd_t);
8664+ cmd = kmalloc(cmd_size, GFP_KERNEL);
8665+ if (!cmd ) {
8666+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
8667+ "1240 Unable to allocate command buffer memory.\n");
8668+ return;
8669+ }
8670+
8671+ rsp_size = sizeof (menlo_init_rsp_t);
8672+ rsp = kmalloc(rsp_size, GFP_KERNEL);
8673+ if (!rsp ) {
8674+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
8675+ "1244 Unable to allocate response buffer memory.\n");
8676+ kfree(rsp);
8677+ return;
8678+ }
8679+
8680+ memset(cmd,0, cmd_size);
8681+ memset(rsp,0, rsp_size);
8682+
8683+ cmd->code = MENLO_CMD_GET_INIT;
8684+ cmd->context = cmd_size;
8685+ cmd->length = rsp_size;
8686+ rc = lpfc_menlo_write (phba, (char *) cmd, 0, cmd_size);
8687+ if ( rc != cmd_size ) {
8688+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
8689+ "1250 Menlo command error. code=%d.\n", rc);
8690+
8691+ kfree (cmd);
8692+ kfree (rsp);
8693+ return;
8694+ }
8695+ cmd->code = MENLO_CMD_GET_INIT;
8696+ cmd->context = 0;
8697+ cmd->length = rsp_size;
8698+ BE_swap32_buffer ((uint8_t *) cmd, cmd_size);
8699+ rc = lpfc_menlo_write (phba, (char *) cmd, cmd_size, cmd_size);
8700+ if ( rc != cmd_size ) {
8701+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
8702+ "1251 Menlo command error. code=%d.\n", rc);
8703+
8704+ kfree (cmd);
8705+ kfree (rsp);
8706+ return;
8707+ }
8708+ rc = lpfc_menlo_read (phba, (char *) rsp, 0, rsp_size,0);
8709+ if ( rc && rc != rsp_size ) {
8710+ lpfc_printf_log (phba, KERN_ERR, LOG_LINK_EVENT,
8711+ "1252 Menlo command error. code=%d.\n", rc);
8712+
8713+ }
8714+ kfree (cmd);
8715+ kfree (rsp);
8716+ return;
8717+}
8718+
8719+struct bin_attribute sysfs_menlo_attr = {
8720+ .attr = {
8721+ .name = "menlo",
8722+ .mode = S_IRUSR | S_IWUSR,
8723+ .owner = THIS_MODULE,
8724+ },
8725+ .size = SYSFS_MENLO_ATTR_SIZE,
8726+ .read = sysfs_menlo_read,
8727+ .write = sysfs_menlo_write,
8728+};
8729--- a/drivers/scsi/lpfc/lpfc_scsi.c
8730+++ b/drivers/scsi/lpfc/lpfc_scsi.c
8731@@ -42,7 +42,6 @@
8732
8733 #define LPFC_RESET_WAIT 2
8734 #define LPFC_ABORT_WAIT 2
8735-
8736 /**
8737 * lpfc_update_stats: Update statistical data for the command completion.
8738 * @phba: Pointer to HBA object.
8739@@ -336,6 +335,22 @@ lpfc_scsi_dev_block(struct lpfc_hba *phb
8740 lpfc_destroy_vport_work_array(phba, vports);
8741 }
8742
8743+void
8744+lpfc_scsi_dev_rescan(struct lpfc_hba *phba)
8745+{
8746+ struct lpfc_vport **vports;
8747+ struct Scsi_Host *shost;
8748+ int i;
8749+
8750+ vports = lpfc_create_vport_work_array(phba);
8751+ if (vports != NULL)
8752+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
8753+ shost = lpfc_shost_from_vport(vports[i]);
8754+ scsi_scan_host(shost);
8755+ }
8756+ lpfc_destroy_vport_work_array(phba, vports);
8757+}
8758+
8759 /*
8760 * This routine allocates a scsi buffer, which contains all the necessary
8761 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
8762@@ -1841,22 +1856,3 @@ struct scsi_host_template lpfc_template
8763 .max_sectors = 0xFFFF,
8764 };
8765
8766-struct scsi_host_template lpfc_vport_template = {
8767- .module = THIS_MODULE,
8768- .name = LPFC_DRIVER_NAME,
8769- .info = lpfc_info,
8770- .queuecommand = lpfc_queuecommand,
8771- .eh_abort_handler = lpfc_abort_handler,
8772- .eh_device_reset_handler= lpfc_device_reset_handler,
8773- .eh_bus_reset_handler = lpfc_bus_reset_handler,
8774- .slave_alloc = lpfc_slave_alloc,
8775- .slave_configure = lpfc_slave_configure,
8776- .slave_destroy = lpfc_slave_destroy,
8777- .scan_finished = lpfc_scan_finished,
8778- .this_id = -1,
8779- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
8780- .cmd_per_lun = LPFC_CMD_PER_LUN,
8781- .use_clustering = ENABLE_CLUSTERING,
8782- .shost_attrs = lpfc_vport_attrs,
8783- .max_sectors = 0xFFFF,
8784-};
8785--- /dev/null
8786+++ b/drivers/scsi/lpfc/lpfc_security.c
8787@@ -0,0 +1,339 @@
8788+/*******************************************************************
8789+ * This file is part of the Emulex Linux Device Driver for *
8790+ * Fibre Channel Host Bus Adapters. *
8791+ * Copyright (C) 2006-2008 Emulex. All rights reserved. *
8792+ * EMULEX and SLI are trademarks of Emulex. *
8793+ * www.emulex.com *
8794+ * *
8795+ * This program is free software; you can redistribute it and/or *
8796+ * modify it under the terms of version 2 of the GNU General *
8797+ * Public License as published by the Free Software Foundation. *
8798+ * This program is distributed in the hope that it will be useful. *
8799+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
8800+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
8801+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
8802+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
8803+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
8804+ * more details, a copy of which can be found in the file COPYING *
8805+ * included with this package. *
8806+ *******************************************************************/
8807+
8808+#include <linux/delay.h>
8809+#include <linux/pci.h>
8810+#include <linux/interrupt.h>
8811+
8812+#include <scsi/scsi_tcq.h>
8813+#include <scsi/scsi_transport_fc.h>
8814+
8815+#include "lpfc_hw.h"
8816+#include "lpfc_sli.h"
8817+#include "lpfc_nl.h"
8818+#include "lpfc_disc.h"
8819+#include "lpfc.h"
8820+#include "lpfc_crtn.h"
8821+#include "lpfc_logmsg.h"
8822+#include "lpfc_security.h"
8823+#include "lpfc_auth_access.h"
8824+#include "lpfc_vport.h"
8825+
8826+uint8_t lpfc_security_service_state = SECURITY_OFFLINE;
8827+
8828+void
8829+lpfc_security_service_online(struct Scsi_Host *shost)
8830+{
8831+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
8832+
8833+ lpfc_security_service_state = SECURITY_ONLINE;
8834+ if (vport->cfg_enable_auth &&
8835+ vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN &&
8836+ vport->phba->link_state == LPFC_HBA_ERROR)
8837+ lpfc_selective_reset(vport->phba);
8838+}
8839+
8840+void
8841+lpfc_security_service_offline(struct Scsi_Host *shost)
8842+{
8843+ lpfc_security_service_state = SECURITY_OFFLINE;
8844+}
8845+
8846+void
8847+lpfc_security_config(struct Scsi_Host *shost, int status, void *rsp)
8848+{
8849+ struct fc_auth_rsp *auth_rsp = (struct fc_auth_rsp *)rsp;
8850+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
8851+ struct lpfc_nodelist *ndlp;
8852+ uint32_t old_interval, new_interval;
8853+ unsigned long new_jiffies, temp_jiffies;
8854+ uint8_t last_auth_mode;
8855+
8856+ if (status)
8857+ return;
8858+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
8859+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8860+ return;
8861+
8862+ vport->auth.bidirectional =
8863+ auth_rsp->u.dhchap_security_config.bidirectional;
8864+ memcpy(&vport->auth.hash_priority[0],
8865+ &auth_rsp->u.dhchap_security_config.hash_priority[0],
8866+ sizeof(vport->auth.hash_priority));
8867+ vport->auth.hash_len = auth_rsp->u.dhchap_security_config.hash_len;
8868+ memcpy(&vport->auth.dh_group_priority[0],
8869+ &auth_rsp->u.dhchap_security_config.
8870+ dh_group_priority[0],
8871+ sizeof(vport->auth.dh_group_priority));
8872+ vport->auth.dh_group_len =
8873+ auth_rsp->u.dhchap_security_config.dh_group_len;
8874+ old_interval = vport->auth.reauth_interval;
8875+ vport->auth.reauth_interval =
8876+ auth_rsp->u.dhchap_security_config.reauth_interval;
8877+ new_interval = vport->auth.reauth_interval;
8878+ /*
8879+ * If interval changed we need to adjust the running timer
8880+ * If enabled then start timer now.
8881+ * If disabled then stop the timer.
8882+ * If changed to chorter then elapsed time, then set to fire now
8883+ * If changed to longer than elapsed time, extend the timer.
8884+ */
8885+ if (old_interval != new_interval &&
8886+ vport->auth.auth_state == LPFC_AUTH_SUCCESS) {
8887+ new_jiffies = msecs_to_jiffies(new_interval * 60000);
8888+ del_timer_sync(&ndlp->nlp_reauth_tmr);
8889+ if (old_interval == 0)
8890+ temp_jiffies = jiffies + new_jiffies;
8891+ if (new_interval == 0)
8892+ temp_jiffies = 0;
8893+ else if (new_jiffies < (jiffies - vport->auth.last_auth))
8894+ temp_jiffies = jiffies + msecs_to_jiffies(1);
8895+ else
8896+ temp_jiffies = jiffies + (new_jiffies -
8897+ (jiffies - vport->auth.last_auth));
8898+ if (temp_jiffies)
8899+ mod_timer(&ndlp->nlp_reauth_tmr, temp_jiffies);
8900+ }
8901+ last_auth_mode = vport->auth.auth_mode;
8902+ vport->auth.auth_mode =
8903+ auth_rsp->u.dhchap_security_config.auth_mode;
8904+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
8905+ "1025 Received security config local_wwpn:"
8906+ "%llX remote_wwpn:%llX \nmode:0x%x "
8907+ "hash(%d):%x:%x:%x:%x bidir:0x%x "
8908+ "dh_group(%d):%x:%x:%x:%x:%x:%x:%x:%x "
8909+ "reauth_interval:0x%x\n",
8910+ (unsigned long long)auth_rsp->local_wwpn,
8911+ (unsigned long long)auth_rsp->remote_wwpn,
8912+ auth_rsp->u.dhchap_security_config.auth_mode,
8913+ auth_rsp->u.dhchap_security_config.hash_len,
8914+ auth_rsp->u.dhchap_security_config.hash_priority[0],
8915+ auth_rsp->u.dhchap_security_config.hash_priority[1],
8916+ auth_rsp->u.dhchap_security_config.hash_priority[2],
8917+ auth_rsp->u.dhchap_security_config.hash_priority[3],
8918+ auth_rsp->u.dhchap_security_config.bidirectional,
8919+ auth_rsp->u.dhchap_security_config.dh_group_len,
8920+ auth_rsp->u.dhchap_security_config.dh_group_priority[0],
8921+ auth_rsp->u.dhchap_security_config.dh_group_priority[1],
8922+ auth_rsp->u.dhchap_security_config.dh_group_priority[2],
8923+ auth_rsp->u.dhchap_security_config.dh_group_priority[3],
8924+ auth_rsp->u.dhchap_security_config.dh_group_priority[4],
8925+ auth_rsp->u.dhchap_security_config.dh_group_priority[5],
8926+ auth_rsp->u.dhchap_security_config.dh_group_priority[6],
8927+ auth_rsp->u.dhchap_security_config.dh_group_priority[7],
8928+ auth_rsp->u.dhchap_security_config.reauth_interval);
8929+ kfree(auth_rsp);
8930+ if (vport->auth.auth_mode == FC_AUTHMODE_ACTIVE)
8931+ vport->auth.security_active = 1;
8932+ else if (vport->auth.auth_mode == FC_AUTHMODE_PASSIVE) {
8933+ if (ndlp->nlp_flag & NLP_SC_REQ)
8934+ vport->auth.security_active = 1;
8935+ else {
8936+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SECURITY,
8937+ "1038 Authentication not "
8938+ "required by the fabric. "
8939+ "Disabled.\n");
8940+ vport->auth.security_active = 0;
8941+ }
8942+ } else {
8943+ vport->auth.security_active = 0;
8944+ /*
8945+ * If switch require authentication and authentication
8946+ * is disabled for this HBA/Fabric port, fail the
8947+ * discovery.
8948+ */
8949+ if (ndlp->nlp_flag & NLP_SC_REQ) {
8950+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
8951+ "1050 Authentication mode is "
8952+ "disabled, but is required by "
8953+ "the fabric.\n");
8954+ lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8955+ /* Cancel discovery timer */
8956+ lpfc_can_disctmo(vport);
8957+ }
8958+ }
8959+ if (last_auth_mode == FC_AUTHMODE_UNKNOWN) {
8960+ if (vport->auth.security_active)
8961+ lpfc_start_authentication(vport, ndlp);
8962+ else
8963+ lpfc_start_discovery(vport);
8964+ }
8965+}
8966+
8967+int
8968+lpfc_get_security_enabled(struct Scsi_Host *shost)
8969+{
8970+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8971+
8972+ return(vport->cfg_enable_auth);
8973+}
8974+
8975+int
8976+lpfc_security_wait(struct lpfc_hba *phba)
8977+{
8978+ int i = 0;
8979+ if (lpfc_security_service_state == SECURITY_ONLINE)
8980+ return 0;
8981+ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY,
8982+ "1058 Waiting for authentication service...\n");
8983+ while (lpfc_security_service_state == SECURITY_OFFLINE) {
8984+ i++;
8985+ if (i > SECURITY_WAIT_TMO * 2)
8986+ return -ETIMEDOUT;
8987+ /* Delay for half of a second */
8988+ msleep(500);
8989+ }
8990+ lpfc_printf_log(phba, KERN_WARNING, LOG_SECURITY,
8991+ "1059 Authentication service online.\n");
8992+ return 0;
8993+}
8994+
8995+int
8996+lpfc_security_config_wait(struct lpfc_vport *vport)
8997+{
8998+ int i = 0;
8999+
9000+ while (vport->auth.auth_mode == FC_AUTHMODE_UNKNOWN) {
9001+ i++;
9002+ if (i > 120) {
9003+ return -ETIMEDOUT;
9004+ }
9005+ /* Delay for half of a second */
9006+ msleep(500);
9007+ }
9008+ return 0;
9009+}
9010+
9011+void
9012+lpfc_reauth_node(unsigned long ptr)
9013+{
9014+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
9015+ struct lpfc_vport *vport = ndlp->vport;
9016+ struct lpfc_hba *phba = vport->phba;
9017+ unsigned long flags;
9018+ struct lpfc_work_evt *evtp = &ndlp->els_reauth_evt;
9019+
9020+ ndlp = (struct lpfc_nodelist *) ptr;
9021+ phba = ndlp->vport->phba;
9022+
9023+ spin_lock_irqsave(&phba->hbalock, flags);
9024+ if (!list_empty(&evtp->evt_listp)) {
9025+ spin_unlock_irqrestore(&phba->hbalock, flags);
9026+ return;
9027+ }
9028+
9029+ /* We need to hold the node resource by incrementing the reference
9030+ * count until this queued work is done
9031+ */
9032+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
9033+ if (evtp->evt_arg1) {
9034+ evtp->evt = LPFC_EVT_REAUTH;
9035+ list_add_tail(&evtp->evt_listp, &phba->work_list);
9036+ lpfc_worker_wake_up(phba);
9037+ }
9038+ spin_unlock_irqrestore(&phba->hbalock, flags);
9039+ return;
9040+}
9041+
9042+void
9043+lpfc_reauthentication_handler(struct lpfc_nodelist *ndlp)
9044+{
9045+ struct lpfc_vport *vport = ndlp->vport;
9046+ if (vport->auth.auth_msg_state != LPFC_DHCHAP_SUCCESS)
9047+ return;
9048+
9049+ if (lpfc_start_node_authentication(ndlp)) {
9050+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
9051+ "1029 Reauthentication Failure\n");
9052+ if (vport->auth.auth_state == LPFC_AUTH_SUCCESS)
9053+ lpfc_port_auth_failed(ndlp);
9054+ }
9055+}
9056+
9057+/*
9058+ * This function will kick start authentication for a node.
9059+ * This is used for re-authentication of a node or a user
9060+ * initiated node authentication.
9061+ */
9062+int
9063+lpfc_start_node_authentication(struct lpfc_nodelist *ndlp)
9064+{
9065+ struct lpfc_vport *vport;
9066+ int ret;
9067+
9068+ vport = ndlp->vport;
9069+ /* If there is authentication timer cancel the timer */
9070+ del_timer_sync(&ndlp->nlp_reauth_tmr);
9071+ ret = lpfc_get_auth_config(ndlp, &ndlp->nlp_portname);
9072+ if (ret)
9073+ return ret;
9074+ ret = lpfc_security_config_wait(vport);
9075+ if (ret) {
9076+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
9077+ "1032 Start Authentication: get config "
9078+ "timed out.\n");
9079+ return ret;
9080+ }
9081+ return 0;
9082+}
9083+
9084+int
9085+lpfc_get_auth_config(struct lpfc_nodelist *ndlp, struct lpfc_name *rwwn)
9086+{
9087+ struct lpfc_vport *vport;
9088+ struct fc_auth_req auth_req;
9089+ struct fc_auth_rsp *auth_rsp;
9090+ struct Scsi_Host *shost;
9091+ int ret;
9092+
9093+ vport = ndlp->vport;
9094+ shost = lpfc_shost_from_vport(vport);
9095+
9096+ auth_req.local_wwpn = wwn_to_u64(vport->fc_portname.u.wwn);
9097+ if (ndlp->nlp_type & NLP_FABRIC)
9098+ auth_req.remote_wwpn = AUTH_FABRIC_WWN;
9099+ else
9100+ auth_req.remote_wwpn = wwn_to_u64(rwwn->u.wwn);
9101+ if (lpfc_security_service_state == SECURITY_OFFLINE) {
9102+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
9103+ "1053 Start Authentication: "
9104+ "Security service offline.\n");
9105+ return -EINVAL;
9106+ }
9107+ auth_rsp = kmalloc(sizeof(struct fc_auth_rsp), GFP_KERNEL);
9108+ if (!auth_rsp) {
9109+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
9110+ "1028 Start Authentication: No buffers\n");
9111+ return -ENOMEM;
9112+ }
9113+ vport->auth.auth_mode = FC_AUTHMODE_UNKNOWN;
9114+ ret = lpfc_fc_security_get_config(shost, &auth_req,
9115+ sizeof(struct fc_auth_req),
9116+ auth_rsp,
9117+ sizeof(struct fc_auth_rsp));
9118+ if (ret) {
9119+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SECURITY,
9120+ "1031 Start Authentication: Get config "
9121+ "failed.\n");
9122+ kfree(auth_rsp);
9123+ return ret;
9124+ }
9125+ return 0;
9126+}
9127--- /dev/null
9128+++ b/drivers/scsi/lpfc/lpfc_security.h
9129@@ -0,0 +1,24 @@
9130+/*******************************************************************
9131+ * This file is part of the Emulex Linux Device Driver for *
9132+ * Fibre Channel Host Bus Adapters. *
9133+ * Copyright (C) 2006-2007 Emulex. All rights reserved. *
9134+ * EMULEX and SLI are trademarks of Emulex. *
9135+ * www.emulex.com *
9136+ * *
9137+ * This program is free software; you can redistribute it and/or *
9138+ * modify it under the terms of version 2 of the GNU General *
9139+ * Public License as published by the Free Software Foundation. *
9140+ * This program is distributed in the hope that it will be useful. *
9141+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
9142+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
9143+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
9144+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
9145+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
9146+ * more details, a copy of which can be found in the file COPYING *
9147+ * included with this package. *
9148+ *******************************************************************/
9149+
9150+#define SECURITY_OFFLINE 0x0
9151+#define SECURITY_ONLINE 0x1
9152+
9153+#define SECURITY_WAIT_TMO 30 /* seconds to wait for the auth service */
9154--- a/drivers/scsi/lpfc/lpfc_sli.c
9155+++ b/drivers/scsi/lpfc/lpfc_sli.c
9156@@ -796,7 +796,7 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba
9157 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
9158 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
9159 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
9160- hbqe->bde.tus.f.bdeFlags = 0;
9161+ hbqe->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
9162 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
9163 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
9164 /* Sync SLIM */
9165@@ -1051,6 +1051,9 @@ lpfc_sli_chk_mbx_command(uint8_t mbxComm
9166 case MBX_REG_VPI:
9167 case MBX_UNREG_VPI:
9168 case MBX_HEARTBEAT:
9169+ case MBX_READ_EVENT_LOG_STATUS:
9170+ case MBX_READ_EVENT_LOG:
9171+ case MBX_WRITE_EVENT_LOG:
9172 case MBX_PORT_CAPABILITIES:
9173 case MBX_PORT_IOV_CONTROL:
9174 ret = mbxCommand;
9175@@ -3546,9 +3549,35 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb
9176 mb->mbxOwner = OWN_CHIP;
9177
9178 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
9179- /* First copy command data to host SLIM area */
9180+ /* Populate mbox extension offset word. */
9181+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9182+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
9183+ = (uint8_t *)phba->mbox_ext
9184+ - (uint8_t *)phba->mbox;
9185+ }
9186+
9187+ /* Copy the mailbox extension data */
9188+ if (pmbox->in_ext_byte_len && pmbox->context2) {
9189+ lpfc_sli_pcimem_bcopy(pmbox->context2,
9190+ (uint8_t*)phba->mbox_ext,
9191+ pmbox->in_ext_byte_len);
9192+ }
9193+ /* Copy command data to host SLIM area */
9194 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
9195+
9196 } else {
9197+ /* Populate mbox extension offset word. */
9198+ if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9199+ *(((uint32_t *)mb) + pmbox->mbox_offset_word)
9200+ = MAILBOX_HBA_EXT_OFFSET;
9201+
9202+ /* Copy the mailbox extension data */
9203+ if (pmbox->in_ext_byte_len && pmbox->context2) {
9204+ lpfc_memcpy_to_slim(phba->MBslimaddr +
9205+ MAILBOX_HBA_EXT_OFFSET,
9206+ pmbox->context2, pmbox->in_ext_byte_len);
9207+
9208+ }
9209 if (mb->mbxCommand == MBX_CONFIG_PORT) {
9210 /* copy command data into host mbox for cmpl */
9211 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
9212@@ -3658,15 +3687,22 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phb
9213 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
9214 /* copy results back to user */
9215 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
9216+ /* Copy the mailbox extension data */
9217+ if (pmbox->out_ext_byte_len && pmbox->context2) {
9218+ lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9219+ pmbox->context2,
9220+ pmbox->out_ext_byte_len);
9221+ }
9222 } else {
9223 /* First copy command data */
9224 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
9225 MAILBOX_CMD_SIZE);
9226- if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
9227- pmbox->context2) {
9228- lpfc_memcpy_from_slim((void *)pmbox->context2,
9229- phba->MBslimaddr + DMP_RSP_OFFSET,
9230- mb->un.varDmp.word_cnt);
9231+ /* Copy the mailbox extension data */
9232+ if (pmbox->out_ext_byte_len && pmbox->context2) {
9233+ lpfc_memcpy_from_slim(pmbox->context2,
9234+ phba->MBslimaddr +
9235+ MAILBOX_HBA_EXT_OFFSET,
9236+ pmbox->out_ext_byte_len);
9237 }
9238 }
9239
9240@@ -5395,6 +5431,15 @@ lpfc_sp_intr_handler(int irq, void *dev_
9241 if (pmb->mbox_cmpl) {
9242 lpfc_sli_pcimem_bcopy(mbox, pmbox,
9243 MAILBOX_CMD_SIZE);
9244+ /* Copy the mailbox extension data */
9245+ if (pmb->out_ext_byte_len &&
9246+ pmb->context2) {
9247+ lpfc_sli_pcimem_bcopy(
9248+ phba->mbox_ext,
9249+ pmb->context2,
9250+ pmb->out_ext_byte_len);
9251+ }
9252+
9253 }
9254 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
9255 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
9256--- a/drivers/scsi/lpfc/lpfc_sli.h
9257+++ b/drivers/scsi/lpfc/lpfc_sli.h
9258@@ -88,6 +88,9 @@ typedef struct lpfcMboxq {
9259
9260 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
9261 uint8_t mbox_flag;
9262+ uint16_t in_ext_byte_len;
9263+ uint16_t out_ext_byte_len;
9264+ uint8_t mbox_offset_word;
9265
9266 } LPFC_MBOXQ_t;
9267
9268--- a/drivers/scsi/lpfc/lpfc_version.h
9269+++ b/drivers/scsi/lpfc/lpfc_version.h
9270@@ -18,7 +18,7 @@
9271 * included with this package. *
9272 *******************************************************************/
9273
9274-#define LPFC_DRIVER_VERSION "8.2.8"
9275+#define LPFC_DRIVER_VERSION "8.2.8.1"
9276
9277 #define LPFC_DRIVER_NAME "lpfc"
9278 #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
9279--- a/drivers/scsi/lpfc/lpfc_vport.c
9280+++ b/drivers/scsi/lpfc/lpfc_vport.c
9281@@ -42,6 +42,7 @@
9282 #include "lpfc_crtn.h"
9283 #include "lpfc_version.h"
9284 #include "lpfc_vport.h"
9285+#include "lpfc_auth_access.h"
9286
9287 inline void lpfc_vport_set_state(struct lpfc_vport *vport,
9288 enum fc_vport_state new_state)
9289@@ -394,6 +395,21 @@ lpfc_vport_create(struct fc_vport *fc_vp
9290 goto error_out;
9291 }
9292
9293+ shost = lpfc_shost_from_vport(vport);
9294+
9295+ if ((lpfc_get_security_enabled)(shost)) {
9296+ spin_lock_irq(&fc_security_user_lock);
9297+
9298+ list_add_tail(&vport->sc_users, &fc_security_user_list);
9299+
9300+ spin_unlock_irq(&fc_security_user_lock);
9301+
9302+ if (fc_service_state == FC_SC_SERVICESTATE_ONLINE) {
9303+ lpfc_fc_queue_security_work(vport,
9304+ &vport->sc_online_work);
9305+ }
9306+ }
9307+
9308 *(struct lpfc_vport **)fc_vport->dd_data = vport;
9309 vport->fc_vport = fc_vport;
9310
9311--- a/drivers/scsi/lpfc/Makefile
9312+++ b/drivers/scsi/lpfc/Makefile
9313@@ -1,7 +1,7 @@
9314 #/*******************************************************************
9315 # * This file is part of the Emulex Linux Device Driver for *
9316 # * Fibre Channel Host Bus Adapters. *
9317-# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
9318+# * Copyright (C) 2004-2008 Emulex. All rights reserved. *
9319 # * EMULEX and SLI are trademarks of Emulex. *
9320 # * www.emulex.com *
9321 # * *
9322@@ -28,4 +28,5 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
9323
9324 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
9325 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
9326- lpfc_vport.o lpfc_debugfs.o
9327+ lpfc_vport.o lpfc_debugfs.o lpfc_security.o lpfc_auth_access.o \
9328+ lpfc_auth.o lpfc_ioctl.o lpfc_menlo.o