]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Jamie Wellnitz <jamie.wellnitz@emulex.com> |
2 | Subject: Update lpfc to 8.2.8.3 | |
3 | References: bnc#420767 | |
4 | ||
5 | This patch updates the SLES 11 inbox lpfc driver to 8.2.8.3 which has several | |
6 | changes, mainly bugfixes: | |
7 | ||
8 | * Changed version number to 8.2.8.3 | |
9 | * Resolved uninitialized node access (CR 83287) | |
10 | * Fixed failing ioctl commands (CR 83850) | |
11 | * Cosmetic coding style clean up | |
12 | * Fix echotest failure when NPIV is enabled (CR 75009) | |
13 | * Fixed Port busy events | |
14 | * Back out slow vports fix (CR 83103) | |
15 | * Added a vendor unique RSCN event to send entire payload to mgmt application | |
16 | * Fixed internal loopback Hornet hardware (CR 83323) | |
17 | * Fixed sysfs write handler for mailbox interface (CR 83674) | |
18 | * Implement driver support for Power Management Suspend/Resume operations (CR | |
19 | 74378) | |
20 | * Changed version number to 8.2.8.2 | |
21 | * Added data structures required for new events. | |
22 | * Streamlined interrupt enable/disable logic into helper routines | |
23 | * Fixed incorrect decrement of cmd_pending count. (CR 83286) | |
24 | * Fixed internal and external loopback on Hornet. (CR 83323) | |
25 | * Removed unnecessary sleeps during HBA initialization. (CR 82846) | |
26 | * Fixed RSCN address format not handled properly. (CR 82252) | |
27 | * Fixed unload driver with vports locks up driver (CR 83334) | |
28 | * Avoid polling HBA Error Attention when HBA's PCI channel is offline | |
29 | ||
30 | Signed-off-by: Jamie Wellnitz <jamie.wellnitz@emulex.com> | |
31 | Signed-off-by: Hannes Reinecke <hare@suse.de> | |
32 | ||
33 | diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c | |
34 | index 273aa4f..8e94902 100644 | |
35 | --- a/drivers/scsi/lpfc/lpfc_attr.c | |
36 | +++ b/drivers/scsi/lpfc/lpfc_attr.c | |
37 | @@ -3531,9 +3531,6 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, | |
38 | uint8_t *ext; | |
39 | uint32_t size; | |
40 | ||
41 | - if ((count + off) > MAILBOX_CMD_SIZE) | |
42 | - return -ERANGE; | |
43 | - | |
44 | if (off % 4 || count % 4 || (unsigned long)buf % 4) | |
45 | return -EINVAL; | |
46 | ||
47 | diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h | |
48 | index 0c90479..a93c555 100644 | |
49 | --- a/drivers/scsi/lpfc/lpfc_crtn.h | |
50 | +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |
51 | @@ -323,11 +323,10 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); | |
52 | void lpfc_fabric_abort_hba(struct lpfc_hba *); | |
53 | void lpfc_fabric_block_timeout(unsigned long); | |
54 | void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); | |
55 | -void lpfc_adjust_queue_depth(struct lpfc_hba *); | |
56 | +void lpfc_rampdown_queue_depth(struct lpfc_hba *); | |
57 | void lpfc_ramp_down_queue_handler(struct lpfc_hba *); | |
58 | void lpfc_ramp_up_queue_handler(struct lpfc_hba *); | |
59 | void lpfc_scsi_dev_block(struct lpfc_hba *); | |
60 | -void lpfc_scsi_dev_rescan(struct lpfc_hba *); | |
61 | ||
62 | void | |
63 | lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, | |
64 | diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c | |
65 | index bce59ec..a95815e 100644 | |
66 | --- a/drivers/scsi/lpfc/lpfc_els.c | |
67 | +++ b/drivers/scsi/lpfc/lpfc_els.c | |
68 | @@ -224,7 +224,11 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |
69 | /* For ELS_REQUEST64_CR, use the VPI by default */ | |
70 | icmd->ulpContext = vport->vpi; | |
71 | icmd->ulpCt_h = 0; | |
72 | - icmd->ulpCt_l = 1; | |
73 | + /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ | |
74 | + if (elscmd == ELS_CMD_ECHO) | |
75 | + icmd->ulpCt_l = 0; /* context = invalid RPI */ | |
76 | + else | |
77 | + icmd->ulpCt_l = 1; /* context = VPI */ | |
78 | } | |
79 | ||
80 | bpl = (struct ulp_bde64 *) pbuflist->virt; | |
81 | @@ -2504,6 +2508,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |
82 | case IOSTAT_LOCAL_REJECT: | |
83 | switch ((irsp->un.ulpWord[4] & 0xff)) { | |
84 | case IOERR_LOOP_OPEN_FAILURE: | |
85 | + if (cmd == ELS_CMD_FLOGI) { | |
86 | + if (PCI_DEVICE_ID_HORNET == | |
87 | + phba->pcidev->device) { | |
88 | + phba->fc_topology = TOPOLOGY_LOOP; | |
89 | + phba->pport->fc_myDID = 0; | |
90 | + phba->alpa_map[0] = 0; | |
91 | + phba->alpa_map[1] = 0; | |
92 | + } | |
93 | + } | |
94 | if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) | |
95 | delay = 1000; | |
96 | retry = 1; | |
97 | @@ -3870,27 +3883,21 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) | |
98 | while (payload_len) { | |
99 | rscn_did.un.word = be32_to_cpu(*lp++); | |
100 | payload_len -= sizeof(uint32_t); | |
101 | - switch (rscn_did.un.b.resv) { | |
102 | - case 0: /* Single N_Port ID effected */ | |
103 | + switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { | |
104 | + case RSCN_ADDRESS_FORMAT_PORT: | |
105 | if (ns_did.un.word == rscn_did.un.word) | |
106 | goto return_did_out; | |
107 | break; | |
108 | - case 1: /* Whole N_Port Area effected */ | |
109 | + case RSCN_ADDRESS_FORMAT_AREA: | |
110 | if ((ns_did.un.b.domain == rscn_did.un.b.domain) | |
111 | && (ns_did.un.b.area == rscn_did.un.b.area)) | |
112 | goto return_did_out; | |
113 | break; | |
114 | - case 2: /* Whole N_Port Domain effected */ | |
115 | + case RSCN_ADDRESS_FORMAT_DOMAIN: | |
116 | if (ns_did.un.b.domain == rscn_did.un.b.domain) | |
117 | goto return_did_out; | |
118 | break; | |
119 | - default: | |
120 | - /* Unknown Identifier in RSCN node */ | |
121 | - lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, | |
122 | - "0217 Unknown Identifier in " | |
123 | - "RSCN payload Data: x%x\n", | |
124 | - rscn_did.un.word); | |
125 | - case 3: /* Whole Fabric effected */ | |
126 | + case RSCN_ADDRESS_FORMAT_FABRIC: | |
127 | goto return_did_out; | |
128 | } | |
129 | } | |
130 | @@ -3934,6 +3941,49 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) | |
131 | } | |
132 | ||
133 | /** | |
134 | + * lpfc_send_rscn_event: Send an RSCN event to management application. | |
135 | + * @vport: pointer to a host virtual N_Port data structure. | |
136 | + * @cmdiocb: pointer to lpfc command iocb data structure. | |
137 | + * | |
138 | + * lpfc_send_rscn_event sends an RSCN netlink event to management | |
139 | + * applications. | |
140 | + */ | |
141 | +static void | |
142 | +lpfc_send_rscn_event(struct lpfc_vport *vport, | |
143 | + struct lpfc_iocbq *cmdiocb) | |
144 | +{ | |
145 | + struct lpfc_dmabuf *pcmd; | |
146 | + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | |
147 | + uint32_t *payload_ptr; | |
148 | + uint32_t payload_len; | |
149 | + struct lpfc_rscn_event_header *rscn_event_data; | |
150 | + | |
151 | + pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; | |
152 | + payload_ptr = (uint32_t *) pcmd->virt; | |
153 | + payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); | |
154 | + | |
155 | + rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + | |
156 | + payload_len, GFP_KERNEL); | |
157 | + if (!rscn_event_data) { | |
158 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | |
159 | + "0147 Failed to allocate memory for RSCN event\n"); | |
160 | + return; | |
161 | + } | |
162 | + rscn_event_data->event_type = FC_REG_RSCN_EVENT; | |
163 | + rscn_event_data->payload_length = payload_len; | |
164 | + memcpy(rscn_event_data->rscn_payload, payload_ptr, | |
165 | + payload_len); | |
166 | + | |
167 | + fc_host_post_vendor_event(shost, | |
168 | + fc_get_event_number(), | |
169 | + sizeof(struct lpfc_els_event_header) + payload_len, | |
170 | + (char *)rscn_event_data, | |
171 | + LPFC_NL_VENDOR_ID); | |
172 | + | |
173 | + kfree(rscn_event_data); | |
174 | +} | |
175 | + | |
176 | +/** | |
177 | * lpfc_els_rcv_rscn: Process an unsolicited rscn iocb. | |
178 | * @vport: pointer to a host virtual N_Port data structure. | |
179 | * @cmdiocb: pointer to lpfc command iocb data structure. | |
180 | @@ -3980,6 +4030,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |
181 | "0214 RSCN received Data: x%x x%x x%x x%x\n", | |
182 | vport->fc_flag, payload_len, *lp, | |
183 | vport->fc_rscn_id_cnt); | |
184 | + | |
185 | + /* Send an RSCN event to the management application */ | |
186 | + lpfc_send_rscn_event(vport, cmdiocb); | |
187 | + | |
188 | for (i = 0; i < payload_len/sizeof(uint32_t); i++) | |
189 | fc_host_post_event(shost, fc_get_event_number(), | |
190 | FCH_EVT_RSCN, lp[i]); | |
191 | @@ -5532,7 +5586,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |
192 | fc_get_event_number(), | |
193 | sizeof(lsrjt_event), | |
194 | (char *)&lsrjt_event, | |
195 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
196 | + LPFC_NL_VENDOR_ID); | |
197 | return; | |
198 | } | |
199 | if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || | |
200 | @@ -5550,7 +5604,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |
201 | fc_get_event_number(), | |
202 | sizeof(fabric_event), | |
203 | (char *)&fabric_event, | |
204 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
205 | + LPFC_NL_VENDOR_ID); | |
206 | return; | |
207 | } | |
208 | ||
209 | @@ -5568,32 +5622,68 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, | |
210 | static void | |
211 | lpfc_send_els_event(struct lpfc_vport *vport, | |
212 | struct lpfc_nodelist *ndlp, | |
213 | - uint32_t cmd) | |
214 | + uint32_t *payload) | |
215 | { | |
216 | - struct lpfc_els_event_header els_data; | |
217 | + struct lpfc_els_event_header *els_data = NULL; | |
218 | + struct lpfc_logo_event *logo_data = NULL; | |
219 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | |
220 | ||
221 | - els_data.event_type = FC_REG_ELS_EVENT; | |
222 | - switch (cmd) { | |
223 | + if (*payload == ELS_CMD_LOGO) { | |
224 | + logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); | |
225 | + if (!logo_data) { | |
226 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | |
227 | + "0148 Failed to allocate memory " | |
228 | + "for LOGO event\n"); | |
229 | + return; | |
230 | + } | |
231 | + els_data = &logo_data->header; | |
232 | + } else { | |
233 | + els_data = kmalloc(sizeof(struct lpfc_els_event_header), | |
234 | + GFP_KERNEL); | |
235 | + if (!els_data) { | |
236 | + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | |
237 | + "0149 Failed to allocate memory " | |
238 | + "for ELS event\n"); | |
239 | + return; | |
240 | + } | |
241 | + } | |
242 | + els_data->event_type = FC_REG_ELS_EVENT; | |
243 | + switch (*payload) { | |
244 | case ELS_CMD_PLOGI: | |
245 | - els_data.subcategory = LPFC_EVENT_PLOGI_RCV; | |
246 | + els_data->subcategory = LPFC_EVENT_PLOGI_RCV; | |
247 | break; | |
248 | case ELS_CMD_PRLO: | |
249 | - els_data.subcategory = LPFC_EVENT_PRLO_RCV; | |
250 | + els_data->subcategory = LPFC_EVENT_PRLO_RCV; | |
251 | break; | |
252 | case ELS_CMD_ADISC: | |
253 | - els_data.subcategory = LPFC_EVENT_ADISC_RCV; | |
254 | + els_data->subcategory = LPFC_EVENT_ADISC_RCV; | |
255 | + break; | |
256 | + case ELS_CMD_LOGO: | |
257 | + els_data->subcategory = LPFC_EVENT_LOGO_RCV; | |
258 | + /* Copy the WWPN in the LOGO payload */ | |
259 | + memcpy(logo_data->logo_wwpn, &payload[2], | |
260 | + sizeof(struct lpfc_name)); | |
261 | break; | |
262 | default: | |
263 | return; | |
264 | } | |
265 | - memcpy(els_data.wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | |
266 | - memcpy(els_data.wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | |
267 | - fc_host_post_vendor_event(shost, | |
268 | - fc_get_event_number(), | |
269 | - sizeof(els_data), | |
270 | - (char *)&els_data, | |
271 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
272 | + memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); | |
273 | + memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); | |
274 | + if (*payload == ELS_CMD_LOGO) { | |
275 | + fc_host_post_vendor_event(shost, | |
276 | + fc_get_event_number(), | |
277 | + sizeof(struct lpfc_logo_event), | |
278 | + (char *)logo_data, | |
279 | + LPFC_NL_VENDOR_ID); | |
280 | + kfree(logo_data); | |
281 | + } else { | |
282 | + fc_host_post_vendor_event(shost, | |
283 | + fc_get_event_number(), | |
284 | + sizeof(struct lpfc_els_event_header), | |
285 | + (char *)els_data, | |
286 | + LPFC_NL_VENDOR_ID); | |
287 | + kfree(els_data); | |
288 | + } | |
289 | ||
290 | return; | |
291 | } | |
292 | @@ -5700,7 +5790,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |
293 | phba->fc_stat.elsRcvPLOGI++; | |
294 | ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); | |
295 | ||
296 | - lpfc_send_els_event(vport, ndlp, cmd); | |
297 | + lpfc_send_els_event(vport, ndlp, payload); | |
298 | if (vport->port_state < LPFC_DISC_AUTH) { | |
299 | if (!(phba->pport->fc_flag & FC_PT2PT) || | |
300 | (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { | |
301 | @@ -5738,6 +5828,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |
302 | did, vport->port_state, ndlp->nlp_flag); | |
303 | ||
304 | phba->fc_stat.elsRcvLOGO++; | |
305 | + lpfc_send_els_event(vport, ndlp, payload); | |
306 | if (vport->port_state < LPFC_DISC_AUTH) { | |
307 | rjt_err = LSRJT_UNABLE_TPC; | |
308 | break; | |
309 | @@ -5750,7 +5841,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |
310 | did, vport->port_state, ndlp->nlp_flag); | |
311 | ||
312 | phba->fc_stat.elsRcvPRLO++; | |
313 | - lpfc_send_els_event(vport, ndlp, cmd); | |
314 | + lpfc_send_els_event(vport, ndlp, payload); | |
315 | if (vport->port_state < LPFC_DISC_AUTH) { | |
316 | rjt_err = LSRJT_UNABLE_TPC; | |
317 | break; | |
318 | @@ -5768,7 +5859,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |
319 | "RCV ADISC: did:x%x/ste:x%x flg:x%x", | |
320 | did, vport->port_state, ndlp->nlp_flag); | |
321 | ||
322 | - lpfc_send_els_event(vport, ndlp, cmd); | |
323 | + lpfc_send_els_event(vport, ndlp, payload); | |
324 | phba->fc_stat.elsRcvADISC++; | |
325 | if (vport->port_state < LPFC_DISC_AUTH) { | |
326 | rjt_err = LSRJT_UNABLE_TPC; | |
327 | diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c | |
328 | index 3d825ff..502a9a5 100644 | |
329 | --- a/drivers/scsi/lpfc/lpfc_hbadisc.c | |
330 | +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |
331 | @@ -391,7 +391,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, | |
332 | evt_data_size = sizeof(fast_evt_data->un. | |
333 | read_check_error); | |
334 | } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || | |
335 | - (evt_sub_category == IOSTAT_NPORT_BSY)) { | |
336 | + (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { | |
337 | evt_data = (char *) &fast_evt_data->un.fabric_evt; | |
338 | evt_data_size = sizeof(fast_evt_data->un.fabric_evt); | |
339 | } else { | |
340 | @@ -428,7 +428,7 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba, | |
341 | fc_get_event_number(), | |
342 | evt_data_size, | |
343 | evt_data, | |
344 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
345 | + LPFC_NL_VENDOR_ID); | |
346 | ||
347 | lpfc_free_fast_evt(phba, fast_evt_data); | |
348 | return; | |
349 | @@ -635,20 +635,25 @@ lpfc_do_work(void *p) | |
350 | set_user_nice(current, -20); | |
351 | phba->data_flags = 0; | |
352 | ||
353 | - while (1) { | |
354 | + while (!kthread_should_stop()) { | |
355 | /* wait and check worker queue activities */ | |
356 | rc = wait_event_interruptible(phba->work_waitq, | |
357 | (test_and_clear_bit(LPFC_DATA_READY, | |
358 | &phba->data_flags) | |
359 | || kthread_should_stop())); | |
360 | - BUG_ON(rc); | |
361 | - | |
362 | - if (kthread_should_stop()) | |
363 | + /* Signal wakeup shall terminate the worker thread */ | |
364 | + if (rc) { | |
365 | + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, | |
366 | + "0433 Wakeup on signal: rc=x%x\n", rc); | |
367 | break; | |
368 | + } | |
369 | ||
370 | /* Attend pending lpfc data processing */ | |
371 | lpfc_work_done(phba); | |
372 | } | |
373 | + phba->worker_thread = NULL; | |
374 | + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, | |
375 | + "0432 Worker thread stopped.\n"); | |
376 | return 0; | |
377 | } | |
378 | ||
379 | @@ -1895,6 +1900,36 @@ lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |
380 | lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, | |
381 | NLP_STE_UNUSED_NODE); | |
382 | } | |
383 | +/** | |
384 | + * lpfc_initialize_node: Initialize all fields of node object. | |
385 | + * @vport: Pointer to Virtual Port object. | |
386 | + * @ndlp: Pointer to FC node object. | |
387 | + * @did: FC_ID of the node. | |
388 | + * This function is always called when node object need to | |
389 | + * be initialized. It initializes all the fields of the node | |
390 | + * object. | |
391 | + **/ | |
392 | +static inline void | |
393 | +lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |
394 | + uint32_t did) | |
395 | +{ | |
396 | + INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | |
397 | + INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | |
398 | + INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp); | |
399 | + init_timer(&ndlp->nlp_delayfunc); | |
400 | + ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | |
401 | + ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | |
402 | + init_timer(&ndlp->nlp_reauth_tmr); | |
403 | + ndlp->nlp_reauth_tmr.function = lpfc_reauth_node; | |
404 | + ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp; | |
405 | + ndlp->nlp_DID = did; | |
406 | + ndlp->vport = vport; | |
407 | + ndlp->nlp_sid = NLP_NO_SID; | |
408 | + kref_init(&ndlp->kref); | |
409 | + NLP_INT_NODE_ACT(ndlp); | |
410 | + atomic_set(&ndlp->cmd_pending, 0); | |
411 | + ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | |
412 | +} | |
413 | ||
414 | struct lpfc_nodelist * | |
415 | lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |
416 | @@ -1935,21 +1970,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |
417 | /* re-initialize ndlp except of ndlp linked list pointer */ | |
418 | memset((((char *)ndlp) + sizeof (struct list_head)), 0, | |
419 | sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); | |
420 | - INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | |
421 | - INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | |
422 | - INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp); | |
423 | - init_timer(&ndlp->nlp_delayfunc); | |
424 | - ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | |
425 | - ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | |
426 | - init_timer(&ndlp->nlp_reauth_tmr); | |
427 | - ndlp->nlp_reauth_tmr.function = lpfc_reauth_node; | |
428 | - ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp; | |
429 | - ndlp->nlp_DID = did; | |
430 | - ndlp->vport = vport; | |
431 | - ndlp->nlp_sid = NLP_NO_SID; | |
432 | - /* ndlp management re-initialize */ | |
433 | - kref_init(&ndlp->kref); | |
434 | - NLP_INT_NODE_ACT(ndlp); | |
435 | + lpfc_initialize_node(vport, ndlp, did); | |
436 | ||
437 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | |
438 | ||
439 | @@ -2561,7 +2582,8 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport) | |
440 | alpa = lpfcAlpaArray[index]; | |
441 | if ((vport->fc_myDID & 0xff) == alpa) | |
442 | continue; | |
443 | - lpfc_setup_disc_node(vport, alpa); | |
444 | + if (!(phba->link_flag & LS_LOOPBACK_MODE)) | |
445 | + lpfc_setup_disc_node(vport, alpa); | |
446 | } | |
447 | } | |
448 | return; | |
449 | @@ -3204,23 +3226,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |
450 | uint32_t did) | |
451 | { | |
452 | memset(ndlp, 0, sizeof (struct lpfc_nodelist)); | |
453 | - INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); | |
454 | - INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); | |
455 | - INIT_LIST_HEAD(&ndlp->els_reauth_evt.evt_listp); | |
456 | - init_timer(&ndlp->nlp_delayfunc); | |
457 | - ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; | |
458 | - ndlp->nlp_delayfunc.data = (unsigned long)ndlp; | |
459 | - init_timer(&ndlp->nlp_reauth_tmr); | |
460 | - ndlp->nlp_reauth_tmr.function = lpfc_reauth_node; | |
461 | - ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp; | |
462 | - ndlp->nlp_DID = did; | |
463 | - ndlp->vport = vport; | |
464 | - ndlp->nlp_sid = NLP_NO_SID; | |
465 | + lpfc_initialize_node(vport, ndlp, did); | |
466 | INIT_LIST_HEAD(&ndlp->nlp_listp); | |
467 | - kref_init(&ndlp->kref); | |
468 | - NLP_INT_NODE_ACT(ndlp); | |
469 | - atomic_set(&ndlp->cmd_pending, 0); | |
470 | - ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | |
471 | ||
472 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, | |
473 | "node init: did:x%x", | |
474 | diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h | |
475 | index 9fc50ef..90d0c5a 100644 | |
476 | --- a/drivers/scsi/lpfc/lpfc_hw.h | |
477 | +++ b/drivers/scsi/lpfc/lpfc_hw.h | |
478 | @@ -66,6 +66,9 @@ | |
479 | ||
480 | #define BUF_SZ_4K 4096 | |
481 | ||
482 | +/* vendor ID used in SCSI netlink calls */ | |
483 | +#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) | |
484 | + | |
485 | /* Common Transport structures and definitions */ | |
486 | ||
487 | union CtRevisionId { | |
488 | @@ -891,6 +894,12 @@ typedef struct _D_ID { /* Structure is in Big Endian format */ | |
489 | } un; | |
490 | } D_ID; | |
491 | ||
492 | +#define RSCN_ADDRESS_FORMAT_PORT 0x0 | |
493 | +#define RSCN_ADDRESS_FORMAT_AREA 0x1 | |
494 | +#define RSCN_ADDRESS_FORMAT_DOMAIN 0x2 | |
495 | +#define RSCN_ADDRESS_FORMAT_FABRIC 0x3 | |
496 | +#define RSCN_ADDRESS_FORMAT_MASK 0x3 | |
497 | + | |
498 | /* | |
499 | * Structure to define all ELS Payload types | |
500 | */ | |
501 | diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c | |
502 | index c19c631..c0ea4fc 100644 | |
503 | --- a/drivers/scsi/lpfc/lpfc_init.c | |
504 | +++ b/drivers/scsi/lpfc/lpfc_init.c | |
505 | @@ -879,8 +879,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) | |
506 | fc_host_post_vendor_event(shost, fc_get_event_number(), | |
507 | sizeof(board_event), | |
508 | (char *) &board_event, | |
509 | - SCSI_NL_VID_TYPE_PCI | |
510 | - | PCI_VENDOR_ID_EMULEX); | |
511 | + LPFC_NL_VENDOR_ID); | |
512 | ||
513 | if (phba->work_hs & HS_FFER6) { | |
514 | /* Re-establishing Link */ | |
515 | @@ -2383,6 +2382,98 @@ lpfc_disable_msix(struct lpfc_hba *phba) | |
516 | } | |
517 | ||
518 | /** | |
519 | + * lpfc_enable_intr: Enable device interrupt. | |
520 | + * @phba: pointer to lpfc hba data structure. | |
521 | + * | |
522 | + * This routine is invoked to enable device interrupt and associate driver's | |
523 | + * interrupt handler(s) to interrupt vector(s). Depends on the interrupt | |
524 | + * mode configured to the driver, the driver will try to fallback from the | |
525 | + * configured interrupt mode to an interrupt mode which is supported by the | |
526 | + * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. | |
527 | + * | |
528 | + * Return codes | |
529 | + * 0 - sucessful | |
530 | + * other values - error | |
531 | + **/ | |
532 | +static int | |
533 | +lpfc_enable_intr(struct lpfc_hba *phba) | |
534 | +{ | |
535 | + int retval = 0; | |
536 | + | |
537 | + /* Starting point of configuring interrupt method */ | |
538 | + phba->intr_type = NONE; | |
539 | + | |
540 | + if (phba->cfg_use_msi == 2) { | |
541 | + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | |
542 | + retval = lpfc_sli_config_port(phba, 3); | |
543 | + if (retval) | |
544 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
545 | + "0478 Firmware not capable of SLI 3 mode.\n"); | |
546 | + else { | |
547 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
548 | + "0479 Firmware capable of SLI 3 mode.\n"); | |
549 | + /* Now, try to enable MSI-X interrupt mode */ | |
550 | + retval = lpfc_enable_msix(phba); | |
551 | + if (!retval) { | |
552 | + phba->intr_type = MSIX; | |
553 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
554 | + "0480 enable MSI-X mode.\n"); | |
555 | + } | |
556 | + } | |
557 | + } | |
558 | + | |
559 | + /* Fallback to MSI if MSI-X initialization failed */ | |
560 | + if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | |
561 | + retval = pci_enable_msi(phba->pcidev); | |
562 | + if (!retval) { | |
563 | + phba->intr_type = MSI; | |
564 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
565 | + "0481 enable MSI mode.\n"); | |
566 | + } else | |
567 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
568 | + "0470 enable IRQ mode.\n"); | |
569 | + } | |
570 | + | |
571 | + /* MSI-X is the only case the doesn't need to call request_irq */ | |
572 | + if (phba->intr_type != MSIX) { | |
573 | + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | |
574 | + IRQF_SHARED, LPFC_DRIVER_NAME, phba); | |
575 | + if (retval) { | |
576 | + if (phba->intr_type == MSI) | |
577 | + pci_disable_msi(phba->pcidev); | |
578 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
579 | + "0471 Enable interrupt handler " | |
580 | + "failed\n"); | |
581 | + } else if (phba->intr_type != MSI) | |
582 | + phba->intr_type = INTx; | |
583 | + } | |
584 | + | |
585 | + return retval; | |
586 | +} | |
587 | + | |
588 | +/** | |
589 | + * lpfc_disable_intr: Disable device interrupt. | |
590 | + * @phba: pointer to lpfc hba data structure. | |
591 | + * | |
592 | + * This routine is invoked to disable device interrupt and disassociate the | |
593 | + * driver's interrupt handler(s) from interrupt vector(s). Depending on the | |
594 | + * interrupt mode, the driver will release the interrupt vector(s) for the | |
595 | + * message signaled interrupt. | |
596 | + **/ | |
597 | +static void | |
598 | +lpfc_disable_intr(struct lpfc_hba *phba) | |
599 | +{ | |
600 | + if (phba->intr_type == MSIX) | |
601 | + lpfc_disable_msix(phba); | |
602 | + else { | |
603 | + free_irq(phba->pcidev->irq, phba); | |
604 | + if (phba->intr_type == MSI) | |
605 | + pci_disable_msi(phba->pcidev); | |
606 | + } | |
607 | + return; | |
608 | +} | |
609 | + | |
610 | +/** | |
611 | * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. | |
612 | * @pdev: pointer to PCI device | |
613 | * @pid: pointer to PCI device identifier | |
614 | @@ -2634,7 +2725,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |
615 | lpfc_debugfs_initialize(vport); | |
616 | ||
617 | pci_set_drvdata(pdev, shost); | |
618 | - phba->intr_type = NONE; | |
619 | ||
620 | phba->MBslimaddr = phba->slim_memmap_p; | |
621 | phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; | |
622 | @@ -2643,48 +2733,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |
623 | phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; | |
624 | ||
625 | /* Configure and enable interrupt */ | |
626 | - if (phba->cfg_use_msi == 2) { | |
627 | - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | |
628 | - error = lpfc_sli_config_port(phba, 3); | |
629 | - if (error) | |
630 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
631 | - "0427 Firmware not capable of SLI 3 mode.\n"); | |
632 | - else { | |
633 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
634 | - "0426 Firmware capable of SLI 3 mode.\n"); | |
635 | - /* Now, try to enable MSI-X interrupt mode */ | |
636 | - error = lpfc_enable_msix(phba); | |
637 | - if (!error) { | |
638 | - phba->intr_type = MSIX; | |
639 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
640 | - "0430 enable MSI-X mode.\n"); | |
641 | - } | |
642 | - } | |
643 | - } | |
644 | - | |
645 | - /* Fallback to MSI if MSI-X initialization failed */ | |
646 | - if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | |
647 | - retval = pci_enable_msi(phba->pcidev); | |
648 | - if (!retval) { | |
649 | - phba->intr_type = MSI; | |
650 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
651 | - "0473 enable MSI mode.\n"); | |
652 | - } else | |
653 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
654 | - "0452 enable IRQ mode.\n"); | |
655 | - } | |
656 | - | |
657 | - /* MSI-X is the only case the doesn't need to call request_irq */ | |
658 | - if (phba->intr_type != MSIX) { | |
659 | - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | |
660 | - IRQF_SHARED, LPFC_DRIVER_NAME, phba); | |
661 | - if (retval) { | |
662 | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " | |
663 | - "interrupt handler failed\n"); | |
664 | - error = retval; | |
665 | - goto out_disable_msi; | |
666 | - } else if (phba->intr_type != MSI) | |
667 | - phba->intr_type = INTx; | |
668 | + error = lpfc_enable_intr(phba); | |
669 | + if (error) { | |
670 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
671 | + "0426 Failed to enable interrupt.\n"); | |
672 | + goto out_destroy_port; | |
673 | } | |
674 | ||
675 | phba->dfc_host = lpfcdfc_host_add(pdev, shost, phba); | |
676 | @@ -2731,7 +2784,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |
677 | fc_host_post_vendor_event(shost, fc_get_event_number(), | |
678 | sizeof(adapter_event), | |
679 | (char *) &adapter_event, | |
680 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
681 | + LPFC_NL_VENDOR_ID); | |
682 | ||
683 | scsi_scan_host(shost); | |
684 | ||
685 | @@ -2747,15 +2800,8 @@ out_free_irq: | |
686 | lpfcdfc_host_del(phba->dfc_host); | |
687 | lpfc_stop_phba_timers(phba); | |
688 | phba->pport->work_port_events = 0; | |
689 | - | |
690 | - if (phba->intr_type == MSIX) | |
691 | - lpfc_disable_msix(phba); | |
692 | - else | |
693 | - free_irq(phba->pcidev->irq, phba); | |
694 | - | |
695 | -out_disable_msi: | |
696 | - if (phba->intr_type == MSI) | |
697 | - pci_disable_msi(phba->pcidev); | |
698 | + lpfc_disable_intr(phba); | |
699 | +out_destroy_port: | |
700 | destroy_port(vport); | |
701 | out_kthread_stop: | |
702 | kthread_stop(phba->worker_thread); | |
703 | @@ -2796,7 +2842,7 @@ out: | |
704 | * @pdev: pointer to PCI device | |
705 | * | |
706 | * This routine is to be registered to the kernel's PCI subsystem. When an | |
707 | - * Emulex HBA is removed from PCI bus. It perform all the necessary cleanup | |
708 | + * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup | |
709 | * for the HBA device to be removed from the PCI subsystem properly. | |
710 | **/ | |
711 | static void __devexit | |
712 | @@ -2804,12 +2850,11 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |
713 | { | |
714 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | |
715 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | |
716 | + struct lpfc_vport **vports; | |
717 | struct lpfc_hba *phba = vport->phba; | |
718 | + int i; | |
719 | int bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
720 | ||
721 | - /* In case PCI channel permanently disabled, rescan SCSI devices */ | |
722 | - if (pdev->error_state == pci_channel_io_perm_failure) | |
723 | - lpfc_scsi_dev_rescan(phba); | |
724 | lpfcdfc_host_del(phba->dfc_host); | |
725 | phba->dfc_host = NULL; | |
726 | ||
727 | @@ -2822,6 +2867,14 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |
728 | ||
729 | kthread_stop(phba->worker_thread); | |
730 | ||
731 | + /* Release all the vports against this physical port */ | |
732 | + vports = lpfc_create_vport_work_array(phba); | |
733 | + if (vports != NULL) | |
734 | + for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) | |
735 | + fc_vport_terminate(vports[i]->fc_vport); | |
736 | + lpfc_destroy_vport_work_array(phba, vports); | |
737 | + | |
738 | + /* Remove FC host and then SCSI host with the physical port */ | |
739 | fc_remove_host(shost); | |
740 | scsi_remove_host(shost); | |
741 | lpfc_cleanup(vport); | |
742 | @@ -2841,13 +2894,8 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |
743 | ||
744 | lpfc_debugfs_terminate(vport); | |
745 | ||
746 | - if (phba->intr_type == MSIX) | |
747 | - lpfc_disable_msix(phba); | |
748 | - else { | |
749 | - free_irq(phba->pcidev->irq, phba); | |
750 | - if (phba->intr_type == MSI) | |
751 | - pci_disable_msi(phba->pcidev); | |
752 | - } | |
753 | + /* Disable interrupt */ | |
754 | + lpfc_disable_intr(phba); | |
755 | ||
756 | pci_set_drvdata(pdev, NULL); | |
757 | scsi_host_put(shost); | |
758 | @@ -2879,6 +2927,111 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |
759 | } | |
760 | ||
761 | /** | |
762 | + * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management. | |
763 | + * @pdev: pointer to PCI device | |
764 | + * @msg: power management message | |
765 | + * | |
766 | + * This routine is to be registered to the kernel's PCI subsystem to support | |
767 | + * system Power Management (PM). When PM invokes this method, it quiesces the | |
768 | + * device by stopping the driver's worker thread for the device, turning off | |
769 | + * device's interrupt and DMA, and bring the device offline. Note that as the | |
770 | + * driver implements the minimum PM requirements to a power-aware driver's PM | |
771 | + * support for suspend/resume -- all the possible PM messages (SUSPEND, | |
772 | + * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND | |
773 | + * and the driver will fully reinitialize its device during resume() method | |
774 | + * call, the driver will set device to PCI_D3hot state in PCI config space | |
775 | + * instead of setting it according to the @msg provided by the PM. | |
776 | + * | |
777 | + * Return code | |
778 | + * 0 - driver suspended the device | |
779 | + * Error otherwise | |
780 | + **/ | |
781 | +static int | |
782 | +lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | |
783 | +{ | |
784 | + struct Scsi_Host *shost = pci_get_drvdata(pdev); | |
785 | + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | |
786 | + | |
787 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
788 | + "0473 PCI device Power Management suspend.\n"); | |
789 | + | |
790 | + /* Bring down the device */ | |
791 | + lpfc_offline_prep(phba); | |
792 | + lpfc_offline(phba); | |
793 | + kthread_stop(phba->worker_thread); | |
794 | + | |
795 | + /* Disable interrupt from device */ | |
796 | + lpfc_disable_intr(phba); | |
797 | + | |
798 | + /* Save device state to PCI config space */ | |
799 | + pci_save_state(pdev); | |
800 | + pci_set_power_state(pdev, PCI_D3hot); | |
801 | + | |
802 | + return 0; | |
803 | +} | |
804 | + | |
805 | +/** | |
806 | + * lpfc_pci_resume_one: lpfc PCI func to resume device for power management. | |
807 | + * @pdev: pointer to PCI device | |
808 | + * | |
809 | + * This routine is to be registered to the kernel's PCI subsystem to support | |
810 | + * system Power Management (PM). When PM invokes this method, it restores | |
811 | + * the device's PCI config space state and fully reinitializes the device | |
812 | + * and brings it online. Note that as the driver implements the minimum PM | |
813 | + * requirements to a power-aware driver's PM for suspend/resume -- all | |
814 | + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() | |
815 | + * method call will be treated as SUSPEND and the driver will fully | |
816 | + * reinitialize its device during resume() method call, the device will be | |
817 | + * set to PCI_D0 directly in PCI config space before restoring the state. | |
818 | + * | |
819 | + * Return code | |
820 | + * 0 - driver suspended the device | |
821 | + * Error otherwise | |
822 | + **/ | |
823 | +static int | |
824 | +lpfc_pci_resume_one(struct pci_dev *pdev) | |
825 | +{ | |
826 | + struct Scsi_Host *shost = pci_get_drvdata(pdev); | |
827 | + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | |
828 | + int error; | |
829 | + | |
830 | + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
831 | + "0452 PCI device Power Management resume.\n"); | |
832 | + | |
833 | + /* Restore device state from PCI config space */ | |
834 | + pci_set_power_state(pdev, PCI_D0); | |
835 | + pci_restore_state(pdev); | |
836 | + if (pdev->is_busmaster) | |
837 | + pci_set_master(pdev); | |
838 | + | |
839 | + /* Startup the kernel thread for this host adapter. */ | |
840 | + phba->worker_thread = kthread_run(lpfc_do_work, phba, | |
841 | + "lpfc_worker_%d", phba->brd_no); | |
842 | + if (IS_ERR(phba->worker_thread)) { | |
843 | + error = PTR_ERR(phba->worker_thread); | |
844 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
845 | + "0434 PM resume failed to start worker " | |
846 | + "thread: error=x%x.\n", error); | |
847 | + return error; | |
848 | + } | |
849 | + | |
850 | + /* Enable interrupt from device */ | |
851 | + error = lpfc_enable_intr(phba); | |
852 | + if (error) { | |
853 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
854 | + "0430 PM resume Failed to enable interrupt: " | |
855 | + "error=x%x.\n", error); | |
856 | + return error; | |
857 | + } | |
858 | + | |
859 | + /* Restart HBA and bring it online */ | |
860 | + lpfc_sli_brdrestart(phba); | |
861 | + lpfc_online(phba); | |
862 | + | |
863 | + return 0; | |
864 | +} | |
865 | + | |
866 | +/** | |
867 | * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. | |
868 | * @pdev: pointer to PCI device. | |
869 | * @state: the current PCI connection state. | |
870 | @@ -2921,13 +3074,8 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |
871 | pring = &psli->ring[psli->fcp_ring]; | |
872 | lpfc_sli_abort_iocb_ring(phba, pring); | |
873 | ||
874 | - if (phba->intr_type == MSIX) | |
875 | - lpfc_disable_msix(phba); | |
876 | - else { | |
877 | - free_irq(phba->pcidev->irq, phba); | |
878 | - if (phba->intr_type == MSI) | |
879 | - pci_disable_msi(phba->pcidev); | |
880 | - } | |
881 | + /* Disable interrupt */ | |
882 | + lpfc_disable_intr(phba); | |
883 | ||
884 | /* Request a slot reset. */ | |
885 | return PCI_ERS_RESULT_NEED_RESET; | |
886 | @@ -2955,7 +3103,7 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |
887 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | |
888 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | |
889 | struct lpfc_sli *psli = &phba->sli; | |
890 | - int error, retval; | |
891 | + int error; | |
892 | ||
893 | dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); | |
894 | if (pci_enable_device_mem(pdev)) { | |
895 | @@ -2971,48 +3119,12 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) | |
896 | spin_unlock_irq(&phba->hbalock); | |
897 | ||
898 | /* Enable configured interrupt method */ | |
899 | - phba->intr_type = NONE; | |
900 | - if (phba->cfg_use_msi == 2) { | |
901 | - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ | |
902 | - error = lpfc_sli_config_port(phba, 3); | |
903 | - if (error) | |
904 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
905 | - "0478 Firmware not capable of SLI 3 mode.\n"); | |
906 | - else { | |
907 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
908 | - "0479 Firmware capable of SLI 3 mode.\n"); | |
909 | - /* Now, try to enable MSI-X interrupt mode */ | |
910 | - error = lpfc_enable_msix(phba); | |
911 | - if (!error) { | |
912 | - phba->intr_type = MSIX; | |
913 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
914 | - "0480 enable MSI-X mode.\n"); | |
915 | - } | |
916 | - } | |
917 | - } | |
918 | - | |
919 | - /* Fallback to MSI if MSI-X initialization failed */ | |
920 | - if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { | |
921 | - retval = pci_enable_msi(phba->pcidev); | |
922 | - if (!retval) { | |
923 | - phba->intr_type = MSI; | |
924 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
925 | - "0481 enable MSI mode.\n"); | |
926 | - } else | |
927 | - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | |
928 | - "0470 enable IRQ mode.\n"); | |
929 | - } | |
930 | - | |
931 | - /* MSI-X is the only case the doesn't need to call request_irq */ | |
932 | - if (phba->intr_type != MSIX) { | |
933 | - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, | |
934 | - IRQF_SHARED, LPFC_DRIVER_NAME, phba); | |
935 | - if (retval) { | |
936 | - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
937 | - "0471 Enable interrupt handler " | |
938 | - "failed\n"); | |
939 | - } else if (phba->intr_type != MSI) | |
940 | - phba->intr_type = INTx; | |
941 | + error = lpfc_enable_intr(phba); | |
942 | + if (error) { | |
943 | + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | |
944 | + "0427 Cannot re-enable interrupt after " | |
945 | + "slot reset.\n"); | |
946 | + return PCI_ERS_RESULT_DISCONNECT; | |
947 | } | |
948 | ||
949 | /* Take device offline; this will perform cleanup */ | |
950 | @@ -3130,6 +3242,8 @@ static struct pci_driver lpfc_driver = { | |
951 | .id_table = lpfc_id_table, | |
952 | .probe = lpfc_pci_probe_one, | |
953 | .remove = __devexit_p(lpfc_pci_remove_one), | |
954 | + .suspend = lpfc_pci_suspend_one, | |
955 | + .resume = lpfc_pci_resume_one, | |
956 | .err_handler = &lpfc_err_handler, | |
957 | }; | |
958 | ||
959 | diff --git a/drivers/scsi/lpfc/lpfc_ioctl.c b/drivers/scsi/lpfc/lpfc_ioctl.c | |
960 | index 242bed3..e80d157 100644 | |
961 | --- a/drivers/scsi/lpfc/lpfc_ioctl.c | |
962 | +++ b/drivers/scsi/lpfc/lpfc_ioctl.c | |
963 | @@ -828,10 +828,10 @@ lpfc_ioctl_send_mgmt_cmd(struct lpfc_hba * phba, | |
964 | rc = EIO; | |
965 | ||
966 | send_mgmt_cmd_free_outdmp: | |
967 | - spin_lock_irq(shost->host_lock); | |
968 | dfc_cmd_data_free(phba, outdmp); | |
969 | send_mgmt_cmd_free_indmp: | |
970 | dfc_cmd_data_free(phba, indmp); | |
971 | + spin_lock_irq(shost->host_lock); | |
972 | send_mgmt_cmd_free_bmpvirt: | |
973 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | |
974 | send_mgmt_cmd_free_bmp: | |
975 | @@ -2069,14 +2069,14 @@ __dfc_cmd_data_alloc(struct lpfc_hba * phba, | |
976 | cnt)) { | |
977 | goto out; | |
978 | } | |
979 | - | |
980 | + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | |
981 | pci_dma_sync_single_for_device(phba->pcidev, | |
982 | dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); | |
983 | ||
984 | - } else | |
985 | + } else { | |
986 | memset((uint8_t *)dmp->dma.virt, 0, cnt); | |
987 | - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | |
988 | - | |
989 | + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; | |
990 | + } | |
991 | /* build buffer ptr list for IOCB */ | |
992 | bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); | |
993 | bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); | |
994 | diff --git a/drivers/scsi/lpfc/lpfc_menlo.c b/drivers/scsi/lpfc/lpfc_menlo.c | |
995 | index 60d3df8..aa36c16 100644 | |
996 | --- a/drivers/scsi/lpfc/lpfc_menlo.c | |
997 | +++ b/drivers/scsi/lpfc/lpfc_menlo.c | |
998 | @@ -42,6 +42,7 @@ | |
999 | #include "lpfc_vport.h" | |
1000 | ||
1001 | #define MENLO_CMD_FW_DOWNLOAD 0x00000002 | |
1002 | +#define MENLO_CMD_LOOPBACK 0x00000014 | |
1003 | ||
1004 | static void lpfc_menlo_iocb_timeout_cmpl(struct lpfc_hba *, | |
1005 | struct lpfc_iocbq *, struct lpfc_iocbq *); | |
1006 | @@ -686,6 +687,16 @@ lpfc_menlo_write(struct lpfc_hba *phba, | |
1007 | } else | |
1008 | memcpy((uint8_t *) mlast->dma.virt, buf, count); | |
1009 | ||
1010 | + if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_LOOPBACK) { | |
1011 | + if (mlast) { | |
1012 | + tmpptr = (uint32_t *)mlast->dma.virt; | |
1013 | + if (*(tmpptr+2)) | |
1014 | + phba->link_flag |= LS_LOOPBACK_MODE; | |
1015 | + else | |
1016 | + phba->link_flag &= ~LS_LOOPBACK_MODE; | |
1017 | + } | |
1018 | + } | |
1019 | + | |
1020 | if (sysfs_menlo->cmdhdr.cmd == MENLO_CMD_FW_DOWNLOAD | |
1021 | && genreq->offset < hdr_offset) { | |
1022 | if (sysfs_menlo->cr.indmp | |
1023 | diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h | |
1024 | index 1accb5a..991ad53 100644 | |
1025 | --- a/drivers/scsi/lpfc/lpfc_nl.h | |
1026 | +++ b/drivers/scsi/lpfc/lpfc_nl.h | |
1027 | @@ -52,6 +52,13 @@ | |
1028 | * The payload sent via the fc transport is one-way driver->application. | |
1029 | */ | |
1030 | ||
1031 | +/* RSCN event header */ | |
1032 | +struct lpfc_rscn_event_header { | |
1033 | + uint32_t event_type; | |
1034 | + uint32_t payload_length; /* RSCN data length in bytes */ | |
1035 | + uint32_t rscn_payload[]; | |
1036 | +}; | |
1037 | + | |
1038 | /* els event header */ | |
1039 | struct lpfc_els_event_header { | |
1040 | uint32_t event_type; | |
1041 | @@ -65,6 +72,7 @@ struct lpfc_els_event_header { | |
1042 | #define LPFC_EVENT_PRLO_RCV 0x02 | |
1043 | #define LPFC_EVENT_ADISC_RCV 0x04 | |
1044 | #define LPFC_EVENT_LSRJT_RCV 0x08 | |
1045 | +#define LPFC_EVENT_LOGO_RCV 0x10 | |
1046 | ||
1047 | /* special els lsrjt event */ | |
1048 | struct lpfc_lsrjt_event { | |
1049 | @@ -74,6 +82,11 @@ struct lpfc_lsrjt_event { | |
1050 | uint32_t explanation; | |
1051 | }; | |
1052 | ||
1053 | +/* special els logo event */ | |
1054 | +struct lpfc_logo_event { | |
1055 | + struct lpfc_els_event_header header; | |
1056 | + uint8_t logo_wwpn[8]; | |
1057 | +}; | |
1058 | ||
1059 | /* fabric event header */ | |
1060 | struct lpfc_fabric_event_header { | |
1061 | @@ -125,6 +138,7 @@ struct lpfc_scsi_varqueuedepth_event { | |
1062 | /* special case scsi check condition event */ | |
1063 | struct lpfc_scsi_check_condition_event { | |
1064 | struct lpfc_scsi_event_header scsi_event; | |
1065 | + uint8_t opcode; | |
1066 | uint8_t sense_key; | |
1067 | uint8_t asc; | |
1068 | uint8_t ascq; | |
1069 | diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c | |
1070 | index a116875..a7ea952 100644 | |
1071 | --- a/drivers/scsi/lpfc/lpfc_scsi.c | |
1072 | +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |
1073 | @@ -147,12 +147,19 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, | |
1074 | return; | |
1075 | } | |
1076 | ||
1077 | -/* | |
1078 | - * This function is called with no lock held when there is a resource | |
1079 | - * error in driver or in firmware. | |
1080 | - */ | |
1081 | +/** | |
1082 | + * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread. | |
1083 | + * @phba: The Hba for which this call is being executed. | |
1084 | + * | |
1085 | + * This routine is called when there is resource error in driver or firmware. | |
1086 | + * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine | |
1087 | + * posts at most 1 event each second. This routine wakes up worker thread of | |
1088 | + * @phba to process WORKER_RAM_DOWN_EVENT event. | |
1089 | + * | |
1090 | + * This routine should be called with no lock held. | |
1091 | + **/ | |
1092 | void | |
1093 | -lpfc_adjust_queue_depth(struct lpfc_hba *phba) | |
1094 | +lpfc_rampdown_queue_depth(struct lpfc_hba *phba) | |
1095 | { | |
1096 | unsigned long flags; | |
1097 | uint32_t evt_posted; | |
1098 | @@ -335,22 +342,6 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) | |
1099 | lpfc_destroy_vport_work_array(phba, vports); | |
1100 | } | |
1101 | ||
1102 | -void | |
1103 | -lpfc_scsi_dev_rescan(struct lpfc_hba *phba) | |
1104 | -{ | |
1105 | - struct lpfc_vport **vports; | |
1106 | - struct Scsi_Host *shost; | |
1107 | - int i; | |
1108 | - | |
1109 | - vports = lpfc_create_vport_work_array(phba); | |
1110 | - if (vports != NULL) | |
1111 | - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | |
1112 | - shost = lpfc_shost_from_vport(vports[i]); | |
1113 | - scsi_scan_host(shost); | |
1114 | - } | |
1115 | - lpfc_destroy_vport_work_array(phba, vports); | |
1116 | -} | |
1117 | - | |
1118 | /* | |
1119 | * This routine allocates a scsi buffer, which contains all the necessary | |
1120 | * information needed to initiate a SCSI I/O. The non-DMAable buffer region | |
1121 | @@ -861,7 +852,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |
1122 | ||
1123 | lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; | |
1124 | lpfc_cmd->status = pIocbOut->iocb.ulpStatus; | |
1125 | - atomic_dec(&pnode->cmd_pending); | |
1126 | + if (pnode && NLP_CHK_NODE_ACT(pnode)) | |
1127 | + atomic_dec(&pnode->cmd_pending); | |
1128 | ||
1129 | if (lpfc_cmd->status) { | |
1130 | if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && | |
1131 | @@ -951,23 +943,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |
1132 | time_after(jiffies, lpfc_cmd->start_time + | |
1133 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { | |
1134 | spin_lock_irqsave(sdev->host->host_lock, flags); | |
1135 | - if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) && | |
1136 | - (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) && | |
1137 | - ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10)))) | |
1138 | - pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending); | |
1139 | - | |
1140 | - pnode->last_change_time = jiffies; | |
1141 | + if (pnode && NLP_CHK_NODE_ACT(pnode)) { | |
1142 | + if (pnode->cmd_qdepth > | |
1143 | + atomic_read(&pnode->cmd_pending) && | |
1144 | + (atomic_read(&pnode->cmd_pending) > | |
1145 | + LPFC_MIN_TGT_QDEPTH) && | |
1146 | + ((cmd->cmnd[0] == READ_10) || | |
1147 | + (cmd->cmnd[0] == WRITE_10))) | |
1148 | + pnode->cmd_qdepth = | |
1149 | + atomic_read(&pnode->cmd_pending); | |
1150 | + | |
1151 | + pnode->last_change_time = jiffies; | |
1152 | + } | |
1153 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | |
1154 | - } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | |
1155 | + } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { | |
1156 | + if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && | |
1157 | time_after(jiffies, pnode->last_change_time + | |
1158 | - msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { | |
1159 | - spin_lock_irqsave(sdev->host->host_lock, flags); | |
1160 | - pnode->cmd_qdepth += pnode->cmd_qdepth * | |
1161 | - LPFC_TGTQ_RAMPUP_PCENT / 100; | |
1162 | - if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) | |
1163 | - pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | |
1164 | - pnode->last_change_time = jiffies; | |
1165 | - spin_unlock_irqrestore(sdev->host->host_lock, flags); | |
1166 | + msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { | |
1167 | + spin_lock_irqsave(sdev->host->host_lock, flags); | |
1168 | + pnode->cmd_qdepth += pnode->cmd_qdepth * | |
1169 | + LPFC_TGTQ_RAMPUP_PCENT / 100; | |
1170 | + if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) | |
1171 | + pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; | |
1172 | + pnode->last_change_time = jiffies; | |
1173 | + spin_unlock_irqrestore(sdev->host->host_lock, flags); | |
1174 | + } | |
1175 | } | |
1176 | ||
1177 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | |
1178 | @@ -1363,13 +1363,13 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |
1179 | cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); | |
1180 | goto out_fail_command; | |
1181 | } | |
1182 | - | |
1183 | - if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) | |
1184 | + if (vport->cfg_max_scsicmpl_time && | |
1185 | + (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)) | |
1186 | goto out_host_busy; | |
1187 | ||
1188 | lpfc_cmd = lpfc_get_scsi_buf(phba); | |
1189 | if (lpfc_cmd == NULL) { | |
1190 | - lpfc_adjust_queue_depth(phba); | |
1191 | + lpfc_rampdown_queue_depth(phba); | |
1192 | ||
1193 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, | |
1194 | "0707 driver's buffer pool is empty, " | |
1195 | @@ -1397,9 +1397,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |
1196 | atomic_inc(&ndlp->cmd_pending); | |
1197 | err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], | |
1198 | &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); | |
1199 | - if (err) | |
1200 | + if (err) { | |
1201 | + atomic_dec(&ndlp->cmd_pending); | |
1202 | goto out_host_busy_free_buf; | |
1203 | - | |
1204 | + } | |
1205 | if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { | |
1206 | lpfc_sli_poll_fcp_ring(phba); | |
1207 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) | |
1208 | @@ -1409,7 +1410,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |
1209 | return 0; | |
1210 | ||
1211 | out_host_busy_free_buf: | |
1212 | - atomic_dec(&ndlp->cmd_pending); | |
1213 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | |
1214 | lpfc_release_scsi_buf(phba, lpfc_cmd); | |
1215 | out_host_busy: | |
1216 | @@ -1575,7 +1575,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) | |
1217 | fc_get_event_number(), | |
1218 | sizeof(scsi_event), | |
1219 | (char *)&scsi_event, | |
1220 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
1221 | + LPFC_NL_VENDOR_ID); | |
1222 | ||
1223 | if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) { | |
1224 | lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, | |
1225 | @@ -1672,7 +1672,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) | |
1226 | fc_get_event_number(), | |
1227 | sizeof(scsi_event), | |
1228 | (char *)&scsi_event, | |
1229 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
1230 | + LPFC_NL_VENDOR_ID); | |
1231 | ||
1232 | lpfc_block_error_handler(cmnd); | |
1233 | /* | |
1234 | diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c | |
1235 | index d4341df..ac78493 100644 | |
1236 | --- a/drivers/scsi/lpfc/lpfc_sli.c | |
1237 | +++ b/drivers/scsi/lpfc/lpfc_sli.c | |
1238 | @@ -1985,7 +1985,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, | |
1239 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | |
1240 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | |
1241 | spin_unlock_irqrestore(&phba->hbalock, iflag); | |
1242 | - lpfc_adjust_queue_depth(phba); | |
1243 | + lpfc_rampdown_queue_depth(phba); | |
1244 | spin_lock_irqsave(&phba->hbalock, iflag); | |
1245 | } | |
1246 | ||
1247 | @@ -2228,7 +2228,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, | |
1248 | if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && | |
1249 | (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { | |
1250 | spin_unlock_irqrestore(&phba->hbalock, iflag); | |
1251 | - lpfc_adjust_queue_depth(phba); | |
1252 | + lpfc_rampdown_queue_depth(phba); | |
1253 | spin_lock_irqsave(&phba->hbalock, iflag); | |
1254 | } | |
1255 | ||
1256 | @@ -2793,7 +2793,6 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |
1257 | { | |
1258 | MAILBOX_t *mb; | |
1259 | struct lpfc_sli *psli; | |
1260 | - uint16_t skip_post; | |
1261 | volatile uint32_t word0; | |
1262 | void __iomem *to_slim; | |
1263 | ||
1264 | @@ -2818,13 +2817,10 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |
1265 | readl(to_slim); /* flush */ | |
1266 | ||
1267 | /* Only skip post after fc_ffinit is completed */ | |
1268 | - if (phba->pport->port_state) { | |
1269 | - skip_post = 1; | |
1270 | + if (phba->pport->port_state) | |
1271 | word0 = 1; /* This is really setting up word1 */ | |
1272 | - } else { | |
1273 | - skip_post = 0; | |
1274 | + else | |
1275 | word0 = 0; /* This is really setting up word1 */ | |
1276 | - } | |
1277 | to_slim = phba->MBslimaddr + sizeof (uint32_t); | |
1278 | writel(*(uint32_t *) mb, to_slim); | |
1279 | readl(to_slim); /* flush */ | |
1280 | @@ -2838,10 +2834,8 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |
1281 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | |
1282 | psli->stats_start = get_seconds(); | |
1283 | ||
1284 | - if (skip_post) | |
1285 | - mdelay(100); | |
1286 | - else | |
1287 | - mdelay(2000); | |
1288 | + /* Give the INITFF and Post time to settle. */ | |
1289 | + mdelay(100); | |
1290 | ||
1291 | lpfc_hba_down_post(phba); | |
1292 | ||
1293 | @@ -3087,7 +3081,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) | |
1294 | spin_unlock_irq(&phba->hbalock); | |
1295 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | |
1296 | lpfc_sli_brdrestart(phba); | |
1297 | - msleep(2500); | |
1298 | rc = lpfc_sli_chipset_init(phba); | |
1299 | if (rc) | |
1300 | break; | |
1301 | @@ -4041,7 +4034,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba, | |
1302 | shost = lpfc_shost_from_vport(phba->pport); | |
1303 | fc_host_post_vendor_event(shost, fc_get_event_number(), | |
1304 | sizeof(temp_event_data), (char *) &temp_event_data, | |
1305 | - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | |
1306 | + LPFC_NL_VENDOR_ID); | |
1307 | ||
1308 | } | |
1309 | ||
1310 | @@ -5220,6 +5213,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |
1311 | { | |
1312 | uint32_t ha_copy; | |
1313 | ||
1314 | + /* If PCI channel is offline, don't process it */ | |
1315 | + if (unlikely(pci_channel_offline(phba->pcidev))) | |
1316 | + return 0; | |
1317 | + | |
1318 | /* If somebody is waiting to handle an eratt, don't process it | |
1319 | * here. The brdkill function will do this. | |
1320 | */ | |
1321 | diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h | |
1322 | index 899a337..a42cef2 100644 | |
1323 | --- a/drivers/scsi/lpfc/lpfc_version.h | |
1324 | +++ b/drivers/scsi/lpfc/lpfc_version.h | |
1325 | @@ -18,7 +18,7 @@ | |
1326 | * included with this package. * | |
1327 | *******************************************************************/ | |
1328 | ||
1329 | -#define LPFC_DRIVER_VERSION "8.2.8.1" | |
1330 | +#define LPFC_DRIVER_VERSION "8.2.8.3" | |
1331 | ||
1332 | #define LPFC_DRIVER_NAME "lpfc" | |
1333 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | |
1334 | diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c | |
1335 | index 8761840..c3a3f6e 100644 | |
1336 | --- a/drivers/scsi/lpfc/lpfc_vport.c | |
1337 | +++ b/drivers/scsi/lpfc/lpfc_vport.c | |
1338 | @@ -605,6 +605,8 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |
1339 | spin_unlock_irq(&phba->hbalock); | |
1340 | kfree(vport->vname); | |
1341 | lpfc_debugfs_terminate(vport); | |
1342 | + | |
1343 | + /* Remove FC host and then SCSI host with the vport */ | |
1344 | fc_remove_host(lpfc_shost_from_vport(vport)); | |
1345 | scsi_remove_host(lpfc_shost_from_vport(vport)); | |
1346 | ||
1347 | @@ -689,8 +691,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |
1348 | } | |
1349 | vport->unreg_vpi_cmpl = VPORT_INVAL; | |
1350 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | |
1351 | - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) | |
1352 | - goto skip_logo; | |
1353 | if (!lpfc_issue_els_npiv_logo(vport, ndlp)) | |
1354 | while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) | |
1355 | timeout = schedule_timeout(timeout); |