]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/lpfc-8.2.8.11-update
Reenabled linux-xen, added patches for Xen Kernel Version 2.6.27.31,
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / lpfc-8.2.8.11-update
1 From: Jamie Wellnitz <jamie.wellnitz@emulex.com>
2 Subject: Update lpfc from 8.2.8.10 to 8.2.8.11
3 References: bnc#464662
4
5 Changes from 8.2.8.10 to 8.2.8.11:
6
7 * Changed version number to 8.2.8.11
8 * Implemented host memory based HGP pointers (CR 87327)
9 * Removed de-reference of scsi device after scsi_done is called
10 (CR 87269)
11 * Fixed system panic due to ndlp indirect reference to phba through
12 vport (CR 86370)
13 * Fixed nodelist not empty when unloading the driver after target
14 reboot test (CR 86213)
15 * Fixed a panic in mailbox timeout handler (CR 85228)
16
17 Signed-off-by: Jamie Wellnitz <Jamie.Wellnitz@emulex.com>
18 Signed-off-by: Hannes Reinecke <hare@suse.de>
19
20 --
21 diff -urpN a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
22 --- a/drivers/scsi/lpfc/lpfc_attr.c 2009-01-08 16:17:47.894022000 -0500
23 +++ b/drivers/scsi/lpfc/lpfc_attr.c 2009-01-08 16:17:48.134023000 -0500
24 @@ -2277,6 +2277,16 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1
25 "Max number of FCP commands we can queue to a specific LUN");
26
27 /*
28 +# hostmem_hgp: This parameter is used to force driver to keep host group
29 +# pointers in host memory. When the parameter is set to zero, the driver
30 +# keeps the host group pointers in HBA memory otherwise the host group
31 +# pointers are kept in the host memory. Value range is [0,1]. Default value
32 +# is 0.
33 +*/
34 +LPFC_ATTR_R(hostmem_hgp, 0, 0, 1,
35 + "Use host memory for host group pointers.");
36 +
37 +/*
38 # hba_queue_depth: This parameter is used to limit the number of outstanding
39 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
40 # value is greater than the maximum number of exchanges supported by the HBA,
41 @@ -3191,6 +3201,7 @@ struct device_attribute *lpfc_hba_attrs[
42 &dev_attr_lpfc_sg_seg_cnt,
43 &dev_attr_lpfc_max_scsicmpl_time,
44 &dev_attr_lpfc_stat_data_ctrl,
45 + &dev_attr_lpfc_hostmem_hgp,
46 NULL,
47 };
48
49 @@ -4881,6 +4892,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
50 lpfc_use_msi_init(phba, lpfc_use_msi);
51 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
52 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
53 + lpfc_hostmem_hgp_init(phba, lpfc_hostmem_hgp);
54 phba->cfg_poll = lpfc_poll;
55 phba->cfg_soft_wwnn = 0L;
56 phba->cfg_soft_wwpn = 0L;
57 diff -urpN a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
58 --- a/drivers/scsi/lpfc/lpfc_disc.h 2009-01-08 16:17:47.932022000 -0500
59 +++ b/drivers/scsi/lpfc/lpfc_disc.h 2009-01-08 16:17:48.172023000 -0500
60 @@ -101,6 +101,7 @@ struct lpfc_nodelist {
61
62 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
63 struct timer_list nlp_reauth_tmr; /* Used for re-authentication */
64 + struct lpfc_hba *phba;
65 struct fc_rport *rport; /* Corresponding FC transport
66 port structure */
67 struct lpfc_vport *vport;
68 diff -urpN a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
69 --- a/drivers/scsi/lpfc/lpfc_els.c 2009-01-08 16:17:47.955022000 -0500
70 +++ b/drivers/scsi/lpfc/lpfc_els.c 2009-01-08 16:17:48.195023000 -0500
71 @@ -6978,7 +6978,7 @@ static void lpfc_fabric_abort_vport(stru
72 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
73 {
74 LIST_HEAD(completions);
75 - struct lpfc_hba *phba = ndlp->vport->phba;
76 + struct lpfc_hba *phba = ndlp->phba;
77 struct lpfc_iocbq *tmp_iocb, *piocb;
78 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
79 IOCB_t *cmd;
80 diff -urpN a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
81 --- a/drivers/scsi/lpfc/lpfc.h 2009-01-08 16:17:47.958022000 -0500
82 +++ b/drivers/scsi/lpfc/lpfc.h 2009-01-08 16:17:48.198024000 -0500
83 @@ -634,6 +634,7 @@ struct lpfc_hba {
84 uint32_t cfg_enable_hba_reset;
85 uint32_t cfg_enable_hba_heartbeat;
86 uint32_t cfg_pci_max_read;
87 + uint32_t cfg_hostmem_hgp;
88
89 lpfc_vpd_t vpd; /* vital product data */
90
91 diff -urpN a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
92 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c 2009-01-08 16:17:47.970022000 -0500
93 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c 2009-01-08 16:17:48.210025000 -0500
94 @@ -120,7 +120,7 @@ lpfc_terminate_rport_io(struct fc_rport
95 return;
96 }
97
98 - phba = ndlp->vport->phba;
99 + phba = ndlp->phba;
100
101 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
102 "rport terminate: sid:x%x did:x%x flg:x%x",
103 @@ -1912,9 +1912,14 @@ lpfc_disable_node(struct lpfc_vport *vpo
104 * @vport: Pointer to Virtual Port object.
105 * @ndlp: Pointer to FC node object.
106 * @did: FC_ID of the node.
107 - * This function is always called when node object need to
108 - * be initialized. It initializes all the fields of the node
109 - * object.
110 + *
111 + * This function is always called when node object need to be initialized.
112 + * It initializes all the fields of the node object. Although the reference
113 + * to phba from @ndlp can be obtained indirectly through it's reference to
114 + * @vport, a direct reference to phba is taken here by @ndlp. This is due
115 + * to the life-span of the @ndlp might go beyond the existence of @vport as
116 + * the final release of ndlp is determined by its reference count. And, the
117 + * operation on @ndlp needs the reference to phba.
118 **/
119 static inline void
120 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
121 @@ -1931,6 +1936,7 @@ lpfc_initialize_node(struct lpfc_vport *
122 ndlp->nlp_reauth_tmr.data = (unsigned long)ndlp;
123 ndlp->nlp_DID = did;
124 ndlp->vport = vport;
125 + ndlp->phba = vport->phba;
126 ndlp->nlp_sid = NLP_NO_SID;
127 kref_init(&ndlp->kref);
128 NLP_INT_NODE_ACT(ndlp);
129 @@ -3268,7 +3274,7 @@ lpfc_nlp_release(struct kref *kref)
130 lpfc_nlp_remove(ndlp->vport, ndlp);
131
132 /* clear the ndlp active flag for all release cases */
133 - phba = ndlp->vport->phba;
134 + phba = ndlp->phba;
135 spin_lock_irqsave(&phba->ndlp_lock, flags);
136 NLP_CLR_NODE_ACT(ndlp);
137 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
138 @@ -3276,7 +3282,7 @@ lpfc_nlp_release(struct kref *kref)
139 /* free ndlp memory for final ndlp release */
140 if (NLP_CHK_FREE_REQ(ndlp)) {
141 kfree(ndlp->lat_data);
142 - mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
143 + mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
144 }
145 }
146
147 @@ -3299,7 +3305,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
148 * ndlp reference count that is in the process of being
149 * released.
150 */
151 - phba = ndlp->vport->phba;
152 + phba = ndlp->phba;
153 spin_lock_irqsave(&phba->ndlp_lock, flags);
154 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
155 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
156 @@ -3335,7 +3341,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
157 "node put: did:x%x flg:x%x refcnt:x%x",
158 ndlp->nlp_DID, ndlp->nlp_flag,
159 atomic_read(&ndlp->kref.refcount));
160 - phba = ndlp->vport->phba;
161 + phba = ndlp->phba;
162 spin_lock_irqsave(&phba->ndlp_lock, flags);
163 /* Check the ndlp memory free acknowledge flag to avoid the
164 * possible race condition that kref_put got invoked again
165 diff -urpN a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
166 --- a/drivers/scsi/lpfc/lpfc_init.c 2009-01-08 16:17:48.011024000 -0500
167 +++ b/drivers/scsi/lpfc/lpfc_init.c 2009-01-08 16:17:48.260024000 -0500
168 @@ -3087,8 +3087,6 @@ lpfc_pci_remove_one(struct pci_dev *pdev
169
170 lpfc_free_sysfs_attr(vport);
171
172 - kthread_stop(phba->worker_thread);
173 -
174 /* Release all the vports against this physical port */
175 vports = lpfc_create_vport_work_array(phba);
176 if (vports != NULL)
177 @@ -3106,7 +3104,12 @@ lpfc_pci_remove_one(struct pci_dev *pdev
178 * clears the rings, discards all mailbox commands, and resets
179 * the HBA.
180 */
181 +
182 + /* HBA interrupt will be diabled after this call */
183 lpfc_sli_hba_down(phba);
184 + /* Stop kthread signal shall trigger work_done one more time */
185 + kthread_stop(phba->worker_thread);
186 + /* Final cleanup of txcmplq and reset the HBA */
187 lpfc_sli_brdrestart(phba);
188
189 lpfc_stop_phba_timers(phba);
190 diff -urpN a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
191 --- a/drivers/scsi/lpfc/lpfc_mbox.c 2009-01-08 16:17:48.031024000 -0500
192 +++ b/drivers/scsi/lpfc/lpfc_mbox.c 2009-01-08 16:17:48.281022000 -0500
193 @@ -1127,9 +1127,6 @@ lpfc_config_port(struct lpfc_hba *phba,
194 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
195 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
196
197 - /* Always Host Group Pointer is in SLIM */
198 - mb->un.varCfgPort.hps = 1;
199 -
200 /* If HBA supports SLI=3 ask for it */
201
202 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
203 @@ -1208,28 +1205,41 @@ lpfc_config_port(struct lpfc_hba *phba,
204 *
205 */
206
207 - if (phba->sli_rev == 3) {
208 - phba->host_gp = &mb_slim->us.s3.host[0];
209 - phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
210 - } else {
211 - phba->host_gp = &mb_slim->us.s2.host[0];
212 + if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
213 + phba->host_gp = &phba->mbox->us.s2.host[0];
214 phba->hbq_put = NULL;
215 - }
216 + offset = (uint8_t *)&phba->mbox->us.s2.host -
217 + (uint8_t *)phba->slim2p.virt;
218 + pdma_addr = phba->slim2p.phys + offset;
219 + phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
220 + phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
221 + } else {
222 + /* Always Host Group Pointer is in SLIM */
223 + mb->un.varCfgPort.hps = 1;
224
225 - /* mask off BAR0's flag bits 0 - 3 */
226 - phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
227 - (void __iomem *)phba->host_gp -
228 - (void __iomem *)phba->MBslimaddr;
229 - if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
230 - phba->pcb->hgpAddrHigh = bar_high;
231 - else
232 - phba->pcb->hgpAddrHigh = 0;
233 - /* write HGP data to SLIM at the required longword offset */
234 - memset(&hgp, 0, sizeof(struct lpfc_hgp));
235 + if (phba->sli_rev == 3) {
236 + phba->host_gp = &mb_slim->us.s3.host[0];
237 + phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
238 + } else {
239 + phba->host_gp = &mb_slim->us.s2.host[0];
240 + phba->hbq_put = NULL;
241 + }
242 +
243 + /* mask off BAR0's flag bits 0 - 3 */
244 + phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
245 + (void __iomem *)phba->host_gp -
246 + (void __iomem *)phba->MBslimaddr;
247 + if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
248 + phba->pcb->hgpAddrHigh = bar_high;
249 + else
250 + phba->pcb->hgpAddrHigh = 0;
251 + /* write HGP data to SLIM at the required longword offset */
252 + memset(&hgp, 0, sizeof(struct lpfc_hgp));
253
254 - for (i=0; i < phba->sli.num_rings; i++) {
255 - lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
256 + for (i = 0; i < phba->sli.num_rings; i++) {
257 + lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
258 sizeof(*phba->host_gp));
259 + }
260 }
261
262 /* Setup Port Group offset */
263 diff -urpN a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
264 --- a/drivers/scsi/lpfc/lpfc_scsi.c 2009-01-08 16:17:48.061026000 -0500
265 +++ b/drivers/scsi/lpfc/lpfc_scsi.c 2009-01-08 16:17:48.310022000 -0500
266 @@ -198,14 +198,14 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
267 */
268 static inline void
269 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
270 - struct scsi_device *sdev)
271 + uint32_t queue_depth)
272 {
273 unsigned long flags;
274 struct lpfc_hba *phba = vport->phba;
275 uint32_t evt_posted;
276 atomic_inc(&phba->num_cmd_success);
277
278 - if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
279 + if (vport->cfg_lun_queue_depth <= queue_depth)
280 return;
281 spin_lock_irqsave(&phba->hbalock, flags);
282 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
283 @@ -849,10 +849,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
284 struct lpfc_nodelist *pnode = rdata->pnode;
285 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
286 int result;
287 - struct scsi_device *sdev, *tmp_sdev;
288 + struct scsi_device *tmp_sdev;
289 int depth = 0;
290 unsigned long flags;
291 struct lpfc_fast_path_event *fast_path_evt;
292 + struct Scsi_Host *shost = cmd->device->host;
293 + uint32_t queue_depth, scsi_id;
294
295 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
296 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
297 @@ -942,11 +944,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
298
299 lpfc_update_stats(phba, lpfc_cmd);
300 result = cmd->result;
301 - sdev = cmd->device;
302 if (vport->cfg_max_scsicmpl_time &&
303 time_after(jiffies, lpfc_cmd->start_time +
304 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
305 - spin_lock_irqsave(sdev->host->host_lock, flags);
306 + spin_lock_irqsave(shost->host_lock, flags);
307 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
308 if (pnode->cmd_qdepth >
309 atomic_read(&pnode->cmd_pending) &&
310 @@ -959,22 +960,26 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
311
312 pnode->last_change_time = jiffies;
313 }
314 - spin_unlock_irqrestore(sdev->host->host_lock, flags);
315 + spin_unlock_irqrestore(shost->host_lock, flags);
316 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
317 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
318 time_after(jiffies, pnode->last_change_time +
319 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
320 - spin_lock_irqsave(sdev->host->host_lock, flags);
321 + spin_lock_irqsave(shost->host_lock, flags);
322 pnode->cmd_qdepth += pnode->cmd_qdepth *
323 LPFC_TGTQ_RAMPUP_PCENT / 100;
324 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
325 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
326 pnode->last_change_time = jiffies;
327 - spin_unlock_irqrestore(sdev->host->host_lock, flags);
328 + spin_unlock_irqrestore(shost->host_lock, flags);
329 }
330 }
331
332 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
333 +
334 + /* The sdev is not guaranteed to be valid post scsi_done upcall. */
335 + queue_depth = cmd->device->queue_depth;
336 + scsi_id = cmd->device->id;
337 cmd->scsi_done(cmd);
338
339 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
340 @@ -982,28 +987,28 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
341 * If there is a thread waiting for command completion
342 * wake up the thread.
343 */
344 - spin_lock_irqsave(sdev->host->host_lock, flags);
345 + spin_lock_irqsave(shost->host_lock, flags);
346 lpfc_cmd->pCmd = NULL;
347 if (lpfc_cmd->waitq)
348 wake_up(lpfc_cmd->waitq);
349 - spin_unlock_irqrestore(sdev->host->host_lock, flags);
350 + spin_unlock_irqrestore(shost->host_lock, flags);
351 lpfc_release_scsi_buf(phba, lpfc_cmd);
352 return;
353 }
354
355
356 if (!result)
357 - lpfc_rampup_queue_depth(vport, sdev);
358 + lpfc_rampup_queue_depth(vport, queue_depth);
359
360 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
361 ((jiffies - pnode->last_ramp_up_time) >
362 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
363 ((jiffies - pnode->last_q_full_time) >
364 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
365 - (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
366 - shost_for_each_device(tmp_sdev, sdev->host) {
367 + (vport->cfg_lun_queue_depth > queue_depth)) {
368 + shost_for_each_device(tmp_sdev, shost) {
369 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
370 - if (tmp_sdev->id != sdev->id)
371 + if (tmp_sdev->id != scsi_id)
372 continue;
373 if (tmp_sdev->ordered_tags)
374 scsi_adjust_queue_depth(tmp_sdev,
375 @@ -1019,7 +1024,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
376 }
377 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
378 0xFFFFFFFF,
379 - sdev->queue_depth - 1, sdev->queue_depth);
380 + queue_depth , queue_depth + 1);
381 }
382
383 /*
384 @@ -1030,8 +1035,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
385 NLP_CHK_NODE_ACT(pnode)) {
386 pnode->last_q_full_time = jiffies;
387
388 - shost_for_each_device(tmp_sdev, sdev->host) {
389 - if (tmp_sdev->id != sdev->id)
390 + shost_for_each_device(tmp_sdev, shost) {
391 + if (tmp_sdev->id != scsi_id)
392 continue;
393 depth = scsi_track_queue_full(tmp_sdev,
394 tmp_sdev->queue_depth - 1);
395 @@ -1043,7 +1048,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
396 * scsi_track_queue_full.
397 */
398 if (depth == -1)
399 - depth = sdev->host->cmd_per_lun;
400 + depth = shost->cmd_per_lun;
401
402 if (depth) {
403 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
404 @@ -1059,11 +1064,11 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba
405 * If there is a thread waiting for command completion
406 * wake up the thread.
407 */
408 - spin_lock_irqsave(sdev->host->host_lock, flags);
409 + spin_lock_irqsave(shost->host_lock, flags);
410 lpfc_cmd->pCmd = NULL;
411 if (lpfc_cmd->waitq)
412 wake_up(lpfc_cmd->waitq);
413 - spin_unlock_irqrestore(sdev->host->host_lock, flags);
414 + spin_unlock_irqrestore(shost->host_lock, flags);
415
416 lpfc_release_scsi_buf(phba, lpfc_cmd);
417 }
418 diff -urpN a/drivers/scsi/lpfc/lpfc_security.c b/drivers/scsi/lpfc/lpfc_security.c
419 --- a/drivers/scsi/lpfc/lpfc_security.c 2009-01-08 16:17:48.067023000 -0500
420 +++ b/drivers/scsi/lpfc/lpfc_security.c 2009-01-08 16:17:48.316022000 -0500
421 @@ -230,7 +230,7 @@ lpfc_reauth_node(unsigned long ptr)
422 struct lpfc_work_evt *evtp = &ndlp->els_reauth_evt;
423
424 ndlp = (struct lpfc_nodelist *) ptr;
425 - phba = ndlp->vport->phba;
426 + phba = ndlp->phba;
427
428 spin_lock_irqsave(&phba->hbalock, flags);
429 if (!list_empty(&evtp->evt_listp)) {
430 diff -urpN a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
431 --- a/drivers/scsi/lpfc/lpfc_sli.c 2009-01-08 16:17:48.087024000 -0500
432 +++ b/drivers/scsi/lpfc/lpfc_sli.c 2009-01-08 16:17:48.335025000 -0500
433 @@ -3327,6 +3327,21 @@ lpfc_mbox_timeout_handler(struct lpfc_hb
434 struct lpfc_sli *psli = &phba->sli;
435 struct lpfc_sli_ring *pring;
436
437 + /* Check the pmbox pointer first. There is a race condition
438 + * between the mbox timeout handler getting executed in the
439 + * worklist and the mailbox actually completing. When this
440 + * race condition occurs, the mbox_active will be NULL.
441 + */
442 + spin_lock_irq(&phba->hbalock);
443 + if (pmbox == NULL) {
444 + lpfc_printf_log(phba, KERN_WARNING,
445 + LOG_MBOX | LOG_SLI,
446 + "0353 Active Mailbox cleared - mailbox timeout "
447 + "exiting\n");
448 + spin_unlock_irq(&phba->hbalock);
449 + return;
450 + }
451 +
452 /* Mbox cmd <mbxCommand> timeout */
453 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
454 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
455 @@ -3334,6 +3349,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hb
456 phba->pport->port_state,
457 phba->sli.sli_flag,
458 phba->sli.mbox_active);
459 + spin_unlock_irq(&phba->hbalock);
460
461 /* Setting state unknown so lpfc_sli_abort_iocb_ring
462 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
463 diff -urpN a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
464 --- a/drivers/scsi/lpfc/lpfc_version.h 2009-01-08 16:17:48.093023000 -0500
465 +++ b/drivers/scsi/lpfc/lpfc_version.h 2009-01-08 16:17:48.341024000 -0500
466 @@ -1,7 +1,7 @@
467 /*******************************************************************
468 * This file is part of the Emulex Linux Device Driver for *
469 * Fibre Channel Host Bus Adapters. *
470 - * Copyright (C) 2004-2008 Emulex. All rights reserved. *
471 + * Copyright (C) 2004-2009 Emulex. All rights reserved. *
472 * EMULEX and SLI are trademarks of Emulex. *
473 * www.emulex.com *
474 * *
475 @@ -18,7 +18,7 @@
476 * included with this package. *
477 *******************************************************************/
478
479 -#define LPFC_DRIVER_VERSION "8.2.8.10"
480 +#define LPFC_DRIVER_VERSION "8.2.8.11"
481
482 #define LPFC_DRIVER_NAME "lpfc"
483 #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
484 @@ -26,4 +26,4 @@
485
486 #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
487 LPFC_DRIVER_VERSION
488 -#define LPFC_COPYRIGHT "Copyright(c) 2004-2008 Emulex. All rights reserved."
489 +#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."