]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/scsi/lpfc/lpfc_scsi.c
[SCSI] libfc,fcoe,fnic: Separate rport and lport max retry counts
[thirdparty/kernel/stable.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
dea3101e
JB
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
e47c9093 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e
JB
8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
dea3101e
JB
21#include <linux/pci.h>
22#include <linux/interrupt.h>
a90f5684 23#include <linux/delay.h>
e2a0a9d6 24#include <asm/unaligned.h>
dea3101e
JB
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_device.h>
e2a0a9d6 28#include <scsi/scsi_eh.h>
dea3101e
JB
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi_transport_fc.h>
32
33#include "lpfc_version.h"
34#include "lpfc_hw.h"
35#include "lpfc_sli.h"
ea2151b4 36#include "lpfc_nl.h"
dea3101e
JB
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
92d7f7b0 42#include "lpfc_vport.h"
dea3101e
JB
43
44#define LPFC_RESET_WAIT 2
45#define LPFC_ABORT_WAIT 2
46
e2a0a9d6
JS
47int _dump_buf_done;
48
49static char *dif_op_str[] = {
50 "SCSI_PROT_NORMAL",
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
59};
60
61static void
62lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63{
64 void *src, *dst;
65 struct scatterlist *sgde = scsi_sglist(cmnd);
66
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69 __func__);
70 return;
71 }
72
73
74 if (!sgde) {
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76 return;
77 }
78
79 dst = (void *) _dump_buf_data;
80 while (sgde) {
81 src = sg_virt(sgde);
82 memcpy(dst, src, sgde->length);
83 dst += sgde->length;
84 sgde = sg_next(sgde);
85 }
86}
87
88static void
89lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90{
91 void *src, *dst;
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94 if (!_dump_buf_dif) {
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96 __func__);
97 return;
98 }
99
100 if (!sgde) {
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102 return;
103 }
104
105 dst = _dump_buf_dif;
106 while (sgde) {
107 src = sg_virt(sgde);
108 memcpy(dst, src, sgde->length);
109 dst += sgde->length;
110 sgde = sg_next(sgde);
111 }
112}
113
ea2151b4 114/**
3621a710 115 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
118 *
119 * This function is called when there is a command completion and this
120 * function updates the statistical data for the command completion.
121 **/
122static void
123lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
124{
125 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
126 struct lpfc_nodelist *pnode = rdata->pnode;
127 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
128 unsigned long flags;
129 struct Scsi_Host *shost = cmd->device->host;
130 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
131 unsigned long latency;
132 int i;
133
134 if (cmd->result)
135 return;
136
9f1e1b50
JS
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
ea2151b4
JS
139 spin_lock_irqsave(shost->host_lock, flags);
140 if (!vport->stat_data_enabled ||
141 vport->stat_data_blocked ||
142 !pnode->lat_data ||
143 (phba->bucket_type == LPFC_NO_BUCKET)) {
144 spin_unlock_irqrestore(shost->host_lock, flags);
145 return;
146 }
ea2151b4
JS
147
148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
150 phba->bucket_step;
9f1e1b50
JS
151 /* check array subscript bounds */
152 if (i < 0)
153 i = 0;
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
156 } else {
157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
158 if (latency <= (phba->bucket_base +
159 ((1<<i)*phba->bucket_step)))
160 break;
161 }
162
163 pnode->lat_data[i].cmd_count++;
164 spin_unlock_irqrestore(shost->host_lock, flags);
165}
166
ea2151b4 167/**
3621a710 168 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
ea2151b4
JS
169 * @phba: Pointer to HBA context object.
170 * @vport: Pointer to vport object.
171 * @ndlp: Pointer to FC node associated with the target.
172 * @lun: Lun number of the scsi device.
173 * @old_val: Old value of the queue depth.
174 * @new_val: New value of the queue depth.
175 *
176 * This function sends an event to the mgmt application indicating
177 * there is a change in the scsi device queue depth.
178 **/
179static void
180lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
181 struct lpfc_vport *vport,
182 struct lpfc_nodelist *ndlp,
183 uint32_t lun,
184 uint32_t old_val,
185 uint32_t new_val)
186{
187 struct lpfc_fast_path_event *fast_path_evt;
188 unsigned long flags;
189
190 fast_path_evt = lpfc_alloc_fast_evt(phba);
191 if (!fast_path_evt)
192 return;
193
194 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
195 FC_REG_SCSI_EVENT;
196 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
197 LPFC_EVENT_VARQUEDEPTH;
198
199 /* Report all luns with change in queue depth */
200 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
201 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
202 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
203 &ndlp->nlp_portname, sizeof(struct lpfc_name));
204 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
205 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
206 }
207
208 fast_path_evt->un.queue_depth_evt.oldval = old_val;
209 fast_path_evt->un.queue_depth_evt.newval = new_val;
210 fast_path_evt->vport = vport;
211
212 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
213 spin_lock_irqsave(&phba->hbalock, flags);
214 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 lpfc_worker_wake_up(phba);
217
218 return;
219}
220
9bad7671 221/**
3621a710 222 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
223 * @phba: The Hba for which this call is being executed.
224 *
225 * This routine is called when there is resource error in driver or firmware.
226 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
227 * posts at most 1 event each second. This routine wakes up worker thread of
228 * @phba to process WORKER_RAM_DOWN_EVENT event.
229 *
230 * This routine should be called with no lock held.
231 **/
92d7f7b0 232void
eaf15d5b 233lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
234{
235 unsigned long flags;
5e9d9b82 236 uint32_t evt_posted;
92d7f7b0
JS
237
238 spin_lock_irqsave(&phba->hbalock, flags);
239 atomic_inc(&phba->num_rsrc_err);
240 phba->last_rsrc_error_time = jiffies;
241
242 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
243 spin_unlock_irqrestore(&phba->hbalock, flags);
244 return;
245 }
246
247 phba->last_ramp_down_time = jiffies;
248
249 spin_unlock_irqrestore(&phba->hbalock, flags);
250
251 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
252 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
253 if (!evt_posted)
92d7f7b0 254 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
255 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
256
5e9d9b82
JS
257 if (!evt_posted)
258 lpfc_worker_wake_up(phba);
92d7f7b0
JS
259 return;
260}
261
9bad7671 262/**
3621a710 263 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
9bad7671
JS
264 * @phba: The Hba for which this call is being executed.
265 *
266 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
267 * post at most 1 event every 5 minute after last_ramp_up_time or
268 * last_rsrc_error_time. This routine wakes up worker thread of @phba
269 * to process WORKER_RAM_DOWN_EVENT event.
270 *
271 * This routine should be called with no lock held.
272 **/
92d7f7b0 273static inline void
3de2a653 274lpfc_rampup_queue_depth(struct lpfc_vport *vport,
a257bf90 275 uint32_t queue_depth)
92d7f7b0
JS
276{
277 unsigned long flags;
3de2a653 278 struct lpfc_hba *phba = vport->phba;
5e9d9b82 279 uint32_t evt_posted;
92d7f7b0
JS
280 atomic_inc(&phba->num_cmd_success);
281
a257bf90 282 if (vport->cfg_lun_queue_depth <= queue_depth)
92d7f7b0 283 return;
92d7f7b0
JS
284 spin_lock_irqsave(&phba->hbalock, flags);
285 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
286 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
287 spin_unlock_irqrestore(&phba->hbalock, flags);
288 return;
289 }
92d7f7b0
JS
290 phba->last_ramp_up_time = jiffies;
291 spin_unlock_irqrestore(&phba->hbalock, flags);
292
293 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
294 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
295 if (!evt_posted)
92d7f7b0 296 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
297 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
298
5e9d9b82
JS
299 if (!evt_posted)
300 lpfc_worker_wake_up(phba);
301 return;
92d7f7b0
JS
302}
303
9bad7671 304/**
3621a710 305 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
306 * @phba: The Hba for which this call is being executed.
307 *
308 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
309 * thread.This routine reduces queue depth for all scsi device on each vport
310 * associated with @phba.
311 **/
92d7f7b0
JS
312void
313lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
314{
549e55cd
JS
315 struct lpfc_vport **vports;
316 struct Scsi_Host *shost;
92d7f7b0 317 struct scsi_device *sdev;
ea2151b4 318 unsigned long new_queue_depth, old_queue_depth;
92d7f7b0 319 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 320 int i;
ea2151b4 321 struct lpfc_rport_data *rdata;
92d7f7b0
JS
322
323 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
324 num_cmd_success = atomic_read(&phba->num_cmd_success);
325
549e55cd
JS
326 vports = lpfc_create_vport_work_array(phba);
327 if (vports != NULL)
09372820 328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
549e55cd
JS
329 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) {
92d7f7b0 331 new_queue_depth =
549e55cd
JS
332 sdev->queue_depth * num_rsrc_err /
333 (num_rsrc_err + num_cmd_success);
334 if (!new_queue_depth)
335 new_queue_depth = sdev->queue_depth - 1;
336 else
337 new_queue_depth = sdev->queue_depth -
338 new_queue_depth;
ea2151b4 339 old_queue_depth = sdev->queue_depth;
549e55cd
JS
340 if (sdev->ordered_tags)
341 scsi_adjust_queue_depth(sdev,
342 MSG_ORDERED_TAG,
343 new_queue_depth);
344 else
345 scsi_adjust_queue_depth(sdev,
346 MSG_SIMPLE_TAG,
347 new_queue_depth);
ea2151b4
JS
348 rdata = sdev->hostdata;
349 if (rdata)
350 lpfc_send_sdev_queuedepth_change_event(
351 phba, vports[i],
352 rdata->pnode,
353 sdev->lun, old_queue_depth,
354 new_queue_depth);
549e55cd 355 }
92d7f7b0 356 }
09372820 357 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
358 atomic_set(&phba->num_rsrc_err, 0);
359 atomic_set(&phba->num_cmd_success, 0);
360}
361
9bad7671 362/**
3621a710 363 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
9bad7671
JS
364 * @phba: The Hba for which this call is being executed.
365 *
366 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
367 * thread.This routine increases queue depth for all scsi device on each vport
368 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
369 * num_cmd_success to zero.
370 **/
92d7f7b0
JS
371void
372lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
373{
549e55cd
JS
374 struct lpfc_vport **vports;
375 struct Scsi_Host *shost;
92d7f7b0 376 struct scsi_device *sdev;
549e55cd 377 int i;
ea2151b4 378 struct lpfc_rport_data *rdata;
549e55cd
JS
379
380 vports = lpfc_create_vport_work_array(phba);
381 if (vports != NULL)
09372820 382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
549e55cd
JS
383 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) {
97eab634
JS
385 if (vports[i]->cfg_lun_queue_depth <=
386 sdev->queue_depth)
387 continue;
549e55cd
JS
388 if (sdev->ordered_tags)
389 scsi_adjust_queue_depth(sdev,
390 MSG_ORDERED_TAG,
391 sdev->queue_depth+1);
392 else
393 scsi_adjust_queue_depth(sdev,
394 MSG_SIMPLE_TAG,
395 sdev->queue_depth+1);
ea2151b4
JS
396 rdata = sdev->hostdata;
397 if (rdata)
398 lpfc_send_sdev_queuedepth_change_event(
399 phba, vports[i],
400 rdata->pnode,
401 sdev->lun,
402 sdev->queue_depth - 1,
403 sdev->queue_depth);
549e55cd 404 }
92d7f7b0 405 }
09372820 406 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
407 atomic_set(&phba->num_rsrc_err, 0);
408 atomic_set(&phba->num_cmd_success, 0);
409}
410
a8e497d5 411/**
3621a710 412 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
413 * @phba: Pointer to HBA context object.
414 *
415 * This function walks vport list and set each SCSI host to block state
416 * by invoking fc_remote_port_delete() routine. This function is invoked
417 * with EEH when device's PCI slot has been permanently disabled.
418 **/
419void
420lpfc_scsi_dev_block(struct lpfc_hba *phba)
421{
422 struct lpfc_vport **vports;
423 struct Scsi_Host *shost;
424 struct scsi_device *sdev;
425 struct fc_rport *rport;
426 int i;
427
428 vports = lpfc_create_vport_work_array(phba);
429 if (vports != NULL)
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev));
434 fc_remote_port_delete(rport);
435 }
436 }
437 lpfc_destroy_vport_work_array(phba, vports);
438}
439
9bad7671 440/**
3621a710 441 * lpfc_new_scsi_buf - Scsi buffer allocator
9bad7671
JS
442 * @vport: The virtual port for which this call being executed.
443 *
dea3101e
JB
444 * This routine allocates a scsi buffer, which contains all the necessary
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
446 * contains information to build the IOCB. The DMAable region contains
9bad7671
JS
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
dea3101e 449 * and the BPL BDE is setup in the IOCB.
9bad7671
JS
450 *
451 * Return codes:
452 * NULL - Error
453 * Pointer to lpfc_scsi_buf data structure - Success
454 **/
dea3101e 455static struct lpfc_scsi_buf *
2e0fef85 456lpfc_new_scsi_buf(struct lpfc_vport *vport)
dea3101e 457{
2e0fef85 458 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
459 struct lpfc_scsi_buf *psb;
460 struct ulp_bde64 *bpl;
461 IOCB_t *iocb;
34b02dcd
JS
462 dma_addr_t pdma_phys_fcp_cmd;
463 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl;
604a3e30 465 uint16_t iotag;
dea3101e 466
bbfbbbc1 467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
dea3101e
JB
468 if (!psb)
469 return NULL;
dea3101e
JB
470
471 /*
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
475 * sg_tablesize.
476 */
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478 &psb->dma_handle);
479 if (!psb->data) {
480 kfree(psb);
481 return NULL;
482 }
483
484 /* Initialize virtual ptrs to dma_buf region. */
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
486
604a3e30
JB
487 /* Allocate iotag for psb->cur_iocbq. */
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
489 if (iotag == 0) {
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
491 psb->data, psb->dma_handle);
492 kfree (psb);
493 return NULL;
494 }
0bd4ca25 495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
604a3e30 496
dea3101e
JB
497 psb->fcp_cmnd = psb->data;
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
500 sizeof(struct fcp_rsp);
501
502 /* Initialize local short-hand pointers. */
503 bpl = psb->fcp_bpl;
34b02dcd
JS
504 pdma_phys_fcp_cmd = psb->dma_handle;
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
dea3101e
JB
508
509 /*
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
511 * list bdes. Initialize the first two and leave the rest for
512 * queuecommand.
513 */
34b02dcd
JS
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
9f1e1b50 518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
dea3101e
JB
519
520 /* Setup the physical region for the FCP RSP */
34b02dcd
JS
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
9f1e1b50 525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
dea3101e
JB
526
527 /*
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
529 * initialize it with all known data now.
530 */
dea3101e
JB
531 iocb = &psb->cur_iocbq.iocb;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
e2a0a9d6
JS
533 if ((phba->sli_rev == 3) &&
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
34b02dcd
JS
535 /* fill in immediate fcp command BDE */
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
539 unsli3.fcp_ext.icd);
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
542 iocb->ulpLe = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
551 } else {
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1;
557 iocb->ulpLe = 1;
558 }
dea3101e
JB
559 iocb->ulpClass = CLASS3;
560
561 return psb;
562}
563
9bad7671 564/**
3621a710 565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
9bad7671
JS
566 * @phba: The Hba for which this call is being executed.
567 *
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller.
570 *
571 * Return codes:
572 * NULL - Error
573 * Pointer to lpfc_scsi_buf - Success
574 **/
455c53ec 575static struct lpfc_scsi_buf*
875fbdfe 576lpfc_get_scsi_buf(struct lpfc_hba * phba)
dea3101e 577{
0bd4ca25
JSEC
578 struct lpfc_scsi_buf * lpfc_cmd = NULL;
579 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
875fbdfe 580 unsigned long iflag = 0;
0bd4ca25 581
875fbdfe 582 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 583 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1dcb58e5
JS
584 if (lpfc_cmd) {
585 lpfc_cmd->seg_cnt = 0;
586 lpfc_cmd->nonsg_phys = 0;
e2a0a9d6 587 lpfc_cmd->prot_seg_cnt = 0;
1dcb58e5 588 }
875fbdfe 589 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
0bd4ca25
JSEC
590 return lpfc_cmd;
591}
dea3101e 592
9bad7671 593/**
3621a710 594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
9bad7671
JS
595 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released.
597 *
598 * This routine releases @psb scsi buffer by adding it to tail of @phba
599 * lpfc_scsi_buf_list list.
600 **/
0bd4ca25 601static void
92d7f7b0 602lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 603{
875fbdfe 604 unsigned long iflag = 0;
dea3101e 605
875fbdfe 606 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 607 psb->pCmd = NULL;
dea3101e 608 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
875fbdfe 609 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
dea3101e
JB
610}
611
9bad7671 612/**
3621a710 613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
9bad7671
JS
614 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped.
616 *
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the
619 * bdea. This routine also initializes all IOCB fields which are dependent on
620 * scsi command request buffer.
621 *
622 * Return codes:
623 * 1 - Error
624 * 0 - Success
625 **/
dea3101e 626static int
92d7f7b0 627lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e
JB
628{
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL;
631 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
632 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
633 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 634 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 635 dma_addr_t physaddr;
34b02dcd 636 uint32_t num_bde = 0;
a0b4f78f 637 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e
JB
638
639 /*
640 * There are three possibilities here - use scatter-gather segment, use
641 * the single mapping, or neither. Start the lpfc command prep by
642 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
643 * data bde entry.
644 */
645 bpl += 2;
c59fd9eb 646 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
647 /*
648 * The driver stores the segment count returned from pci_map_sg
649 * because this a count of dma-mappings used to map the use_sg
650 * pages. They are not guaranteed to be the same for those
651 * architectures that implement an IOMMU.
652 */
dea3101e 653
c59fd9eb
FT
654 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
655 scsi_sg_count(scsi_cmnd), datadir);
656 if (unlikely(!nseg))
657 return 1;
658
a0b4f78f 659 lpfc_cmd->seg_cnt = nseg;
dea3101e
JB
660 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
661 printk(KERN_ERR "%s: Too many sg segments from "
e2a0a9d6 662 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 663 __func__, phba->cfg_sg_seg_cnt,
dea3101e 664 lpfc_cmd->seg_cnt);
a0b4f78f 665 scsi_dma_unmap(scsi_cmnd);
dea3101e
JB
666 return 1;
667 }
668
669 /*
670 * The driver established a maximum scatter-gather segment count
671 * during probe that limits the number of sg elements in any
672 * single scsi command. Just run through the seg_cnt and format
673 * the bde's.
34b02dcd
JS
674 * When using SLI-3 the driver will try to fit all the BDEs into
675 * the IOCB. If it can't then the BDEs get added to a BPL as it
676 * does for SLI-2 mode.
dea3101e 677 */
34b02dcd 678 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 679 physaddr = sg_dma_address(sgel);
34b02dcd 680 if (phba->sli_rev == 3 &&
e2a0a9d6 681 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
34b02dcd
JS
682 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
683 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
684 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
685 data_bde->addrLow = putPaddrLow(physaddr);
686 data_bde->addrHigh = putPaddrHigh(physaddr);
687 data_bde++;
688 } else {
689 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
690 bpl->tus.f.bdeSize = sg_dma_len(sgel);
691 bpl->tus.w = le32_to_cpu(bpl->tus.w);
692 bpl->addrLow =
693 le32_to_cpu(putPaddrLow(physaddr));
694 bpl->addrHigh =
695 le32_to_cpu(putPaddrHigh(physaddr));
696 bpl++;
697 }
dea3101e 698 }
c59fd9eb 699 }
dea3101e
JB
700
701 /*
702 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
703 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
704 * explicitly reinitialized and for SLI-3 the extended bde count is
705 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 706 */
e2a0a9d6
JS
707 if (phba->sli_rev == 3 &&
708 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
34b02dcd
JS
709 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
710 /*
711 * The extended IOCB format can only fit 3 BDE or a BPL.
712 * This I/O has more than 3 BDE so the 1st data bde will
713 * be a BPL that is filled in here.
714 */
715 physaddr = lpfc_cmd->dma_handle;
716 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
717 data_bde->tus.f.bdeSize = (num_bde *
718 sizeof(struct ulp_bde64));
719 physaddr += (sizeof(struct fcp_cmnd) +
720 sizeof(struct fcp_rsp) +
721 (2 * sizeof(struct ulp_bde64)));
722 data_bde->addrHigh = putPaddrHigh(physaddr);
723 data_bde->addrLow = putPaddrLow(physaddr);
724 /* ebde count includes the responce bde and data bpl */
725 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
726 } else {
727 /* ebde count includes the responce bde and data bdes */
728 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
729 }
730 } else {
731 iocb_cmd->un.fcpi64.bdl.bdeSize =
732 ((num_bde + 2) * sizeof(struct ulp_bde64));
733 }
09372820 734 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
735
736 /*
737 * Due to difference in data length between DIF/non-DIF paths,
738 * we need to set word 4 of IOCB here
739 */
a257bf90 740 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
741 return 0;
742}
743
744/*
745 * Given a scsi cmnd, determine the BlockGuard profile to be used
746 * with the cmd
747 */
748static int
749lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
750{
751 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
752 uint8_t ret_prof = LPFC_PROF_INVALID;
753
754 if (guard_type == SHOST_DIX_GUARD_IP) {
755 switch (scsi_get_prot_op(sc)) {
756 case SCSI_PROT_READ_INSERT:
757 case SCSI_PROT_WRITE_STRIP:
758 ret_prof = LPFC_PROF_AST2;
759 break;
760
761 case SCSI_PROT_READ_STRIP:
762 case SCSI_PROT_WRITE_INSERT:
763 ret_prof = LPFC_PROF_A1;
764 break;
765
766 case SCSI_PROT_READ_CONVERT:
767 case SCSI_PROT_WRITE_CONVERT:
768 ret_prof = LPFC_PROF_AST1;
769 break;
770
771 case SCSI_PROT_READ_PASS:
772 case SCSI_PROT_WRITE_PASS:
773 case SCSI_PROT_NORMAL:
774 default:
775 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
776 scsi_get_prot_op(sc), guard_type);
777 break;
778
779 }
780 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
781 switch (scsi_get_prot_op(sc)) {
782 case SCSI_PROT_READ_STRIP:
783 case SCSI_PROT_WRITE_INSERT:
784 ret_prof = LPFC_PROF_A1;
785 break;
786
787 case SCSI_PROT_READ_PASS:
788 case SCSI_PROT_WRITE_PASS:
789 ret_prof = LPFC_PROF_C1;
790 break;
791
792 case SCSI_PROT_READ_CONVERT:
793 case SCSI_PROT_WRITE_CONVERT:
794 case SCSI_PROT_READ_INSERT:
795 case SCSI_PROT_WRITE_STRIP:
796 case SCSI_PROT_NORMAL:
797 default:
798 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
799 scsi_get_prot_op(sc), guard_type);
800 break;
801 }
802 } else {
803 /* unsupported format */
804 BUG();
805 }
806
807 return ret_prof;
808}
809
810struct scsi_dif_tuple {
811 __be16 guard_tag; /* Checksum */
812 __be16 app_tag; /* Opaque storage */
813 __be32 ref_tag; /* Target LBA or indirect LBA */
814};
815
816static inline unsigned
817lpfc_cmd_blksize(struct scsi_cmnd *sc)
818{
819 return sc->device->sector_size;
820}
821
822/**
823 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
824 * @sc: in: SCSI command
3621a710
JS
825 * @apptagmask: out: app tag mask
826 * @apptagval: out: app tag value
827 * @reftag: out: ref tag (reference tag)
e2a0a9d6
JS
828 *
829 * Description:
830 * Extract DIF paramters from the command if possible. Otherwise,
831 * use default paratmers.
832 *
833 **/
834static inline void
835lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
836 uint16_t *apptagval, uint32_t *reftag)
837{
838 struct scsi_dif_tuple *spt;
839 unsigned char op = scsi_get_prot_op(sc);
840 unsigned int protcnt = scsi_prot_sg_count(sc);
841 static int cnt;
842
843 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
844 op == SCSI_PROT_WRITE_PASS ||
845 op == SCSI_PROT_WRITE_CONVERT)) {
846
847 cnt++;
848 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
849 scsi_prot_sglist(sc)[0].offset;
850 *apptagmask = 0;
851 *apptagval = 0;
852 *reftag = cpu_to_be32(spt->ref_tag);
853
854 } else {
855 /* SBC defines ref tag to be lower 32bits of LBA */
856 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
857 *apptagmask = 0;
858 *apptagval = 0;
859 }
860}
861
862/*
863 * This function sets up buffer list for protection groups of
864 * type LPFC_PG_TYPE_NO_DIF
865 *
866 * This is usually used when the HBA is instructed to generate
867 * DIFs and insert them into data stream (or strip DIF from
868 * incoming data stream)
869 *
870 * The buffer list consists of just one protection group described
871 * below:
872 * +-------------------------+
873 * start of prot group --> | PDE_1 |
874 * +-------------------------+
875 * | Data BDE |
876 * +-------------------------+
877 * |more Data BDE's ... (opt)|
878 * +-------------------------+
879 *
880 * @sc: pointer to scsi command we're working on
881 * @bpl: pointer to buffer list for protection groups
882 * @datacnt: number of segments of data that have been dma mapped
883 *
884 * Note: Data s/g buffers have been dma mapped
885 */
886static int
887lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
888 struct ulp_bde64 *bpl, int datasegcnt)
889{
890 struct scatterlist *sgde = NULL; /* s/g data entry */
891 struct lpfc_pde *pde1 = NULL;
892 dma_addr_t physaddr;
893 int i = 0, num_bde = 0;
894 int datadir = sc->sc_data_direction;
895 int prof = LPFC_PROF_INVALID;
896 unsigned blksize;
897 uint32_t reftag;
898 uint16_t apptagmask, apptagval;
899
900 pde1 = (struct lpfc_pde *) bpl;
901 prof = lpfc_sc_to_sli_prof(sc);
902
903 if (prof == LPFC_PROF_INVALID)
904 goto out;
905
906 /* extract some info from the scsi command for PDE1*/
907 blksize = lpfc_cmd_blksize(sc);
908 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
909
910 /* setup PDE1 with what we have */
911 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
912 BG_EC_STOP_ERR);
913 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
914
915 num_bde++;
916 bpl++;
917
918 /* assumption: caller has already run dma_map_sg on command data */
919 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
920 physaddr = sg_dma_address(sgde);
921 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
922 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
923 bpl->tus.f.bdeSize = sg_dma_len(sgde);
924 if (datadir == DMA_TO_DEVICE)
925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
926 else
927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
929 bpl++;
930 num_bde++;
931 }
932
933out:
934 return num_bde;
935}
936
937/*
938 * This function sets up buffer list for protection groups of
939 * type LPFC_PG_TYPE_DIF_BUF
940 *
941 * This is usually used when DIFs are in their own buffers,
942 * separate from the data. The HBA can then by instructed
943 * to place the DIFs in the outgoing stream. For read operations,
944 * The HBA could extract the DIFs and place it in DIF buffers.
945 *
946 * The buffer list for this type consists of one or more of the
947 * protection groups described below:
948 * +-------------------------+
949 * start of first prot group --> | PDE_1 |
950 * +-------------------------+
951 * | PDE_3 (Prot BDE) |
952 * +-------------------------+
953 * | Data BDE |
954 * +-------------------------+
955 * |more Data BDE's ... (opt)|
956 * +-------------------------+
957 * start of new prot group --> | PDE_1 |
958 * +-------------------------+
959 * | ... |
960 * +-------------------------+
961 *
962 * @sc: pointer to scsi command we're working on
963 * @bpl: pointer to buffer list for protection groups
964 * @datacnt: number of segments of data that have been dma mapped
965 * @protcnt: number of segment of protection data that have been dma mapped
966 *
967 * Note: It is assumed that both data and protection s/g buffers have been
968 * mapped for DMA
969 */
970static int
971lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
972 struct ulp_bde64 *bpl, int datacnt, int protcnt)
973{
974 struct scatterlist *sgde = NULL; /* s/g data entry */
975 struct scatterlist *sgpe = NULL; /* s/g prot entry */
976 struct lpfc_pde *pde1 = NULL;
977 struct ulp_bde64 *prot_bde = NULL;
978 dma_addr_t dataphysaddr, protphysaddr;
979 unsigned short curr_data = 0, curr_prot = 0;
980 unsigned int split_offset, protgroup_len;
981 unsigned int protgrp_blks, protgrp_bytes;
982 unsigned int remainder, subtotal;
983 int prof = LPFC_PROF_INVALID;
984 int datadir = sc->sc_data_direction;
985 unsigned char pgdone = 0, alldone = 0;
986 unsigned blksize;
987 uint32_t reftag;
988 uint16_t apptagmask, apptagval;
989 int num_bde = 0;
990
991 sgpe = scsi_prot_sglist(sc);
992 sgde = scsi_sglist(sc);
993
994 if (!sgpe || !sgde) {
995 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
996 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
997 sgpe, sgde);
998 return 0;
999 }
1000
1001 prof = lpfc_sc_to_sli_prof(sc);
1002 if (prof == LPFC_PROF_INVALID)
1003 goto out;
1004
1005 /* extract some info from the scsi command for PDE1*/
1006 blksize = lpfc_cmd_blksize(sc);
1007 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1008
1009 split_offset = 0;
1010 do {
1011 /* setup the first PDE_1 */
1012 pde1 = (struct lpfc_pde *) bpl;
1013
1014 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1015 BG_EC_STOP_ERR);
1016 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1017
1018 num_bde++;
1019 bpl++;
1020
1021 /* setup the first BDE that points to protection buffer */
1022 prot_bde = (struct ulp_bde64 *) bpl;
1023 protphysaddr = sg_dma_address(sgpe);
1024 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1025 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1026 protgroup_len = sg_dma_len(sgpe);
1027
1028
1029 /* must be integer multiple of the DIF block length */
1030 BUG_ON(protgroup_len % 8);
1031
1032 protgrp_blks = protgroup_len / 8;
1033 protgrp_bytes = protgrp_blks * blksize;
1034
1035 prot_bde->tus.f.bdeSize = protgroup_len;
1036 if (datadir == DMA_TO_DEVICE)
1037 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1038 else
1039 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1040 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1041
1042 curr_prot++;
1043 num_bde++;
1044
1045 /* setup BDE's for data blocks associated with DIF data */
1046 pgdone = 0;
1047 subtotal = 0; /* total bytes processed for current prot grp */
1048 while (!pgdone) {
1049 if (!sgde) {
1050 printk(KERN_ERR "%s Invalid data segment\n",
1051 __func__);
1052 return 0;
1053 }
1054 bpl++;
1055 dataphysaddr = sg_dma_address(sgde) + split_offset;
1056 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1057 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1058
1059 remainder = sg_dma_len(sgde) - split_offset;
1060
1061 if ((subtotal + remainder) <= protgrp_bytes) {
1062 /* we can use this whole buffer */
1063 bpl->tus.f.bdeSize = remainder;
1064 split_offset = 0;
1065
1066 if ((subtotal + remainder) == protgrp_bytes)
1067 pgdone = 1;
1068 } else {
1069 /* must split this buffer with next prot grp */
1070 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1071 split_offset += bpl->tus.f.bdeSize;
1072 }
1073
1074 subtotal += bpl->tus.f.bdeSize;
1075
1076 if (datadir == DMA_TO_DEVICE)
1077 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1078 else
1079 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1080 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1081
1082 num_bde++;
1083 curr_data++;
1084
1085 if (split_offset)
1086 break;
1087
1088 /* Move to the next s/g segment if possible */
1089 sgde = sg_next(sgde);
1090 }
1091
1092 /* are we done ? */
1093 if (curr_prot == protcnt) {
1094 alldone = 1;
1095 } else if (curr_prot < protcnt) {
1096 /* advance to next prot buffer */
1097 sgpe = sg_next(sgpe);
1098 bpl++;
1099
1100 /* update the reference tag */
1101 reftag += protgrp_blks;
1102 } else {
1103 /* if we're here, we have a bug */
1104 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1105 }
1106
1107 } while (!alldone);
1108
1109out:
1110
1111
1112 return num_bde;
1113}
1114/*
1115 * Given a SCSI command that supports DIF, determine composition of protection
1116 * groups involved in setting up buffer lists
1117 *
1118 * Returns:
1119 * for DIF (for both read and write)
1120 * */
1121static int
1122lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1123{
1124 int ret = LPFC_PG_TYPE_INVALID;
1125 unsigned char op = scsi_get_prot_op(sc);
1126
1127 switch (op) {
1128 case SCSI_PROT_READ_STRIP:
1129 case SCSI_PROT_WRITE_INSERT:
1130 ret = LPFC_PG_TYPE_NO_DIF;
1131 break;
1132 case SCSI_PROT_READ_INSERT:
1133 case SCSI_PROT_WRITE_STRIP:
1134 case SCSI_PROT_READ_PASS:
1135 case SCSI_PROT_WRITE_PASS:
1136 case SCSI_PROT_WRITE_CONVERT:
1137 case SCSI_PROT_READ_CONVERT:
1138 ret = LPFC_PG_TYPE_DIF_BUF;
1139 break;
1140 default:
1141 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1142 "9021 Unsupported protection op:%d\n", op);
1143 break;
1144 }
1145
1146 return ret;
1147}
1148
1149/*
1150 * This is the protection/DIF aware version of
1151 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1152 * two functions eventually, but for now, it's here
1153 */
1154static int
1155lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1156 struct lpfc_scsi_buf *lpfc_cmd)
1157{
1158 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1159 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1160 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1161 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1162 uint32_t num_bde = 0;
1163 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1164 int prot_group_type = 0;
1165 int diflen, fcpdl;
1166 unsigned blksize;
1167
1168 /*
1169 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1170 * fcp_rsp regions to the first data bde entry
1171 */
1172 bpl += 2;
1173 if (scsi_sg_count(scsi_cmnd)) {
1174 /*
1175 * The driver stores the segment count returned from pci_map_sg
1176 * because this a count of dma-mappings used to map the use_sg
1177 * pages. They are not guaranteed to be the same for those
1178 * architectures that implement an IOMMU.
1179 */
1180 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1181 scsi_sglist(scsi_cmnd),
1182 scsi_sg_count(scsi_cmnd), datadir);
1183 if (unlikely(!datasegcnt))
1184 return 1;
1185
1186 lpfc_cmd->seg_cnt = datasegcnt;
1187 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1188 printk(KERN_ERR "%s: Too many sg segments from "
1189 "dma_map_sg. Config %d, seg_cnt %d\n",
1190 __func__, phba->cfg_sg_seg_cnt,
1191 lpfc_cmd->seg_cnt);
1192 scsi_dma_unmap(scsi_cmnd);
1193 return 1;
1194 }
1195
1196 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1197
1198 switch (prot_group_type) {
1199 case LPFC_PG_TYPE_NO_DIF:
1200 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1201 datasegcnt);
1202 /* we shoud have 2 or more entries in buffer list */
1203 if (num_bde < 2)
1204 goto err;
1205 break;
1206 case LPFC_PG_TYPE_DIF_BUF:{
1207 /*
1208 * This type indicates that protection buffers are
1209 * passed to the driver, so that needs to be prepared
1210 * for DMA
1211 */
1212 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1213 scsi_prot_sglist(scsi_cmnd),
1214 scsi_prot_sg_count(scsi_cmnd), datadir);
1215 if (unlikely(!protsegcnt)) {
1216 scsi_dma_unmap(scsi_cmnd);
1217 return 1;
1218 }
1219
1220 lpfc_cmd->prot_seg_cnt = protsegcnt;
1221 if (lpfc_cmd->prot_seg_cnt
1222 > phba->cfg_prot_sg_seg_cnt) {
1223 printk(KERN_ERR "%s: Too many prot sg segments "
1224 "from dma_map_sg. Config %d,"
1225 "prot_seg_cnt %d\n", __func__,
1226 phba->cfg_prot_sg_seg_cnt,
1227 lpfc_cmd->prot_seg_cnt);
1228 dma_unmap_sg(&phba->pcidev->dev,
1229 scsi_prot_sglist(scsi_cmnd),
1230 scsi_prot_sg_count(scsi_cmnd),
1231 datadir);
1232 scsi_dma_unmap(scsi_cmnd);
1233 return 1;
1234 }
1235
1236 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1237 datasegcnt, protsegcnt);
1238 /* we shoud have 3 or more entries in buffer list */
1239 if (num_bde < 3)
1240 goto err;
1241 break;
1242 }
1243 case LPFC_PG_TYPE_INVALID:
1244 default:
1245 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1246 "9022 Unexpected protection group %i\n",
1247 prot_group_type);
1248 return 1;
1249 }
1250 }
1251
1252 /*
1253 * Finish initializing those IOCB fields that are dependent on the
1254 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1255 * reinitialized since all iocb memory resources are used many times
1256 * for transmit, receive, and continuation bpl's.
1257 */
1258 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1259 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1260 iocb_cmd->ulpBdeCount = 1;
1261 iocb_cmd->ulpLe = 1;
1262
1263 fcpdl = scsi_bufflen(scsi_cmnd);
1264
1265 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1266 /*
1267 * We are in DIF Type 1 mode
1268 * Every data block has a 8 byte DIF (trailer)
1269 * attached to it. Must ajust FCP data length
1270 */
1271 blksize = lpfc_cmd_blksize(scsi_cmnd);
1272 diflen = (fcpdl / blksize) * 8;
1273 fcpdl += diflen;
1274 }
1275 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1276
1277 /*
1278 * Due to difference in data length between DIF/non-DIF paths,
1279 * we need to set word 4 of IOCB here
1280 */
1281 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1282
dea3101e 1283 return 0;
e2a0a9d6
JS
1284err:
1285 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1286 "9023 Could not setup all needed BDE's"
1287 "prot_group_type=%d, num_bde=%d\n",
1288 prot_group_type, num_bde);
1289 return 1;
1290}
1291
1292/*
1293 * This function checks for BlockGuard errors detected by
1294 * the HBA. In case of errors, the ASC/ASCQ fields in the
1295 * sense buffer will be set accordingly, paired with
1296 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1297 * detected corruption.
1298 *
1299 * Returns:
1300 * 0 - No error found
1301 * 1 - BlockGuard error found
1302 * -1 - Internal error (bad profile, ...etc)
1303 */
1304static int
1305lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1306 struct lpfc_iocbq *pIocbOut)
1307{
1308 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1309 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1310 int ret = 0;
1311 uint32_t bghm = bgf->bghm;
1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0;
1314
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1316 "bgstat=0x%x bghm=0x%x\n",
87b5c328 1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
e2a0a9d6
JS
1318 cmd->request->nr_sectors, bgstat, bghm);
1319
1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) {
1322 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1323 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1324 lpfc_debug_save_data(cmd);
1325
1326 /* If we have a prot sgl, save the DIF buffer */
1327 if (lpfc_prot_group_type(phba, cmd) ==
1328 LPFC_PG_TYPE_DIF_BUF) {
1329 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1330 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1331 lpfc_debug_save_dif(cmd);
1332 }
1333
1334 _dump_buf_done = 1;
1335 }
1336 spin_unlock(&_dump_buf_lock);
1337
1338 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1339 cmd->result = ScsiResult(DID_ERROR, 0);
1340 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1341 bgstat);
1342 ret = (-1);
1343 goto out;
1344 }
1345
1346 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1347 cmd->result = ScsiResult(DID_ERROR, 0);
1348 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1349 bgstat);
1350 ret = (-1);
1351 goto out;
1352 }
1353
1354 if (lpfc_bgs_get_guard_err(bgstat)) {
1355 ret = 1;
1356
1357 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1358 0x10, 0x1);
1c9fbafc 1359 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1360 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1361 phba->bg_guard_err_cnt++;
1362 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1363 }
1364
1365 if (lpfc_bgs_get_reftag_err(bgstat)) {
1366 ret = 1;
1367
1368 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1369 0x10, 0x3);
1c9fbafc 1370 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1371 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1372
1373 phba->bg_reftag_err_cnt++;
1374 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1375 }
1376
1377 if (lpfc_bgs_get_apptag_err(bgstat)) {
1378 ret = 1;
1379
1380 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1381 0x10, 0x2);
1c9fbafc 1382 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1383 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1384
1385 phba->bg_apptag_err_cnt++;
1386 printk(KERN_ERR "BLKGRD: app_tag error\n");
1387 }
1388
1389 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1390 /*
1391 * setup sense data descriptor 0 per SPC-4 as an information
1392 * field, and put the failing LBA in it
1393 */
1394 cmd->sense_buffer[8] = 0; /* Information */
1395 cmd->sense_buffer[9] = 0xa; /* Add. length */
2344b5b6 1396 bghm /= cmd->device->sector_size;
e2a0a9d6
JS
1397
1398 failing_sector = scsi_get_lba(cmd);
1399 failing_sector += bghm;
1400
1401 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1402 }
1403
1404 if (!ret) {
1405 /* No error was reported - problem in FW? */
1406 cmd->result = ScsiResult(DID_ERROR, 0);
1407 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1408 }
1409
1410out:
1411 return ret;
dea3101e
JB
1412}
1413
ea2151b4 1414/**
3621a710 1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
1416 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object.
1418 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1419 * @rsp_iocb: Pointer to response iocb object which reported error.
1420 *
1421 * This function posts an event when there is a SCSI command reporting
1422 * error from the scsi device.
1423 **/
1424static void
1425lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1426 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1427 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1428 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1429 uint32_t resp_info = fcprsp->rspStatus2;
1430 uint32_t scsi_status = fcprsp->rspStatus3;
1431 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1432 struct lpfc_fast_path_event *fast_path_evt = NULL;
1433 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1434 unsigned long flags;
1435
1436 /* If there is queuefull or busy condition send a scsi event */
1437 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1438 (cmnd->result == SAM_STAT_BUSY)) {
1439 fast_path_evt = lpfc_alloc_fast_evt(phba);
1440 if (!fast_path_evt)
1441 return;
1442 fast_path_evt->un.scsi_evt.event_type =
1443 FC_REG_SCSI_EVENT;
1444 fast_path_evt->un.scsi_evt.subcategory =
1445 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1446 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1447 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1448 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1449 &pnode->nlp_portname, sizeof(struct lpfc_name));
1450 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1451 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1452 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1453 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1454 fast_path_evt = lpfc_alloc_fast_evt(phba);
1455 if (!fast_path_evt)
1456 return;
1457 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1458 FC_REG_SCSI_EVENT;
1459 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1460 LPFC_EVENT_CHECK_COND;
1461 fast_path_evt->un.check_cond_evt.scsi_event.lun =
1462 cmnd->device->lun;
1463 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1464 &pnode->nlp_portname, sizeof(struct lpfc_name));
1465 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1466 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1467 fast_path_evt->un.check_cond_evt.sense_key =
1468 cmnd->sense_buffer[2] & 0xf;
1469 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1470 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1471 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1472 fcpi_parm &&
1473 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1474 ((scsi_status == SAM_STAT_GOOD) &&
1475 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
1476 /*
1477 * If status is good or resid does not match with fcp_param and
1478 * there is valid fcpi_parm, then there is a read_check error
1479 */
1480 fast_path_evt = lpfc_alloc_fast_evt(phba);
1481 if (!fast_path_evt)
1482 return;
1483 fast_path_evt->un.read_check_error.header.event_type =
1484 FC_REG_FABRIC_EVENT;
1485 fast_path_evt->un.read_check_error.header.subcategory =
1486 LPFC_EVENT_FCPRDCHKERR;
1487 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
1488 &pnode->nlp_portname, sizeof(struct lpfc_name));
1489 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
1490 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1491 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
1492 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
1493 fast_path_evt->un.read_check_error.fcpiparam =
1494 fcpi_parm;
1495 } else
1496 return;
1497
1498 fast_path_evt->vport = vport;
1499 spin_lock_irqsave(&phba->hbalock, flags);
1500 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
1501 spin_unlock_irqrestore(&phba->hbalock, flags);
1502 lpfc_worker_wake_up(phba);
1503 return;
1504}
9bad7671
JS
1505
1506/**
3621a710 1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
9bad7671
JS
1508 * @phba: The Hba for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped.
1510 *
1511 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd.
1513 **/
bcf4dbfa
JS
1514static void
1515lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1516{
1517 /*
1518 * There are only two special cases to consider. (1) the scsi command
1519 * requested scatter-gather usage or (2) the scsi command allocated
1520 * a request buffer, but did not request use_sg. There is a third
1521 * case, but it does not require resource deallocation.
1522 */
a0b4f78f
FT
1523 if (psb->seg_cnt > 0)
1524 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
1525 if (psb->prot_seg_cnt > 0)
1526 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1527 scsi_prot_sg_count(psb->pCmd),
1528 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
1529}
1530
9bad7671 1531/**
3621a710 1532 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
1533 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1535 * @rsp_iocb: The response IOCB which contains FCP error.
1536 *
1537 * This routine is called to process response IOCB with status field
1538 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1539 * based upon SCSI and FCP error.
1540 **/
dea3101e 1541static void
2e0fef85
JS
1542lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1543 struct lpfc_iocbq *rsp_iocb)
dea3101e
JB
1544{
1545 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1546 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
1547 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 1548 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e
JB
1549 uint32_t resp_info = fcprsp->rspStatus2;
1550 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 1551 uint32_t *lp;
dea3101e
JB
1552 uint32_t host_status = DID_OK;
1553 uint32_t rsplen = 0;
c7743956 1554 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 1555
ea2151b4 1556
dea3101e
JB
1557 /*
1558 * If this is a task management command, there is no
1559 * scsi packet associated with this lpfc_cmd. The driver
1560 * consumes it.
1561 */
1562 if (fcpcmd->fcpCntl2) {
1563 scsi_status = 0;
1564 goto out;
1565 }
1566
c7743956
JS
1567 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
1568 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
1569 if (snslen > SCSI_SENSE_BUFFERSIZE)
1570 snslen = SCSI_SENSE_BUFFERSIZE;
1571
1572 if (resp_info & RSP_LEN_VALID)
1573 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1574 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
1575 }
1576 lp = (uint32_t *)cmnd->sense_buffer;
1577
1578 if (!scsi_status && (resp_info & RESID_UNDER))
1579 logit = LOG_FCP;
1580
e8b62011 1581 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 1582 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
1583 "Data: x%x x%x x%x x%x x%x\n",
1584 cmnd->cmnd[0], scsi_status,
1585 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
1586 be32_to_cpu(fcprsp->rspResId),
1587 be32_to_cpu(fcprsp->rspSnsLen),
1588 be32_to_cpu(fcprsp->rspRspLen),
1589 fcprsp->rspInfo3);
dea3101e
JB
1590
1591 if (resp_info & RSP_LEN_VALID) {
1592 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1593 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
1594 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
1595 host_status = DID_ERROR;
1596 goto out;
1597 }
1598 }
1599
a0b4f78f 1600 scsi_set_resid(cmnd, 0);
dea3101e 1601 if (resp_info & RESID_UNDER) {
a0b4f78f 1602 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 1603
e8b62011 1604 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 1605 "9025 FCP Read Underrun, expected %d, "
e8b62011
JS
1606 "residual %d Data: x%x x%x x%x\n",
1607 be32_to_cpu(fcpcmd->fcpDl),
1608 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
1609 cmnd->underflow);
dea3101e 1610
7054a606
JS
1611 /*
1612 * If there is an under run check if under run reported by
1613 * storage array is same as the under run reported by HBA.
1614 * If this is not same, there is a dropped frame.
1615 */
1616 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1617 fcpi_parm &&
a0b4f78f 1618 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
1619 lpfc_printf_vlog(vport, KERN_WARNING,
1620 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 1621 "9026 FCP Read Check Error "
e8b62011
JS
1622 "and Underrun Data: x%x x%x x%x x%x\n",
1623 be32_to_cpu(fcpcmd->fcpDl),
1624 scsi_get_resid(cmnd), fcpi_parm,
1625 cmnd->cmnd[0]);
a0b4f78f 1626 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
1627 host_status = DID_ERROR;
1628 }
dea3101e
JB
1629 /*
1630 * The cmnd->underflow is the minimum number of bytes that must
1631 * be transfered for this command. Provided a sense condition
1632 * is not present, make sure the actual amount transferred is at
1633 * least the underflow value or fail.
1634 */
1635 if (!(resp_info & SNS_LEN_VALID) &&
1636 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
1637 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
1638 < cmnd->underflow)) {
e8b62011 1639 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 1640 "9027 FCP command x%x residual "
e8b62011
JS
1641 "underrun converted to error "
1642 "Data: x%x x%x x%x\n",
66dbfbe6 1643 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 1644 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e
JB
1645 host_status = DID_ERROR;
1646 }
1647 } else if (resp_info & RESID_OVER) {
e8b62011 1648 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 1649 "9028 FCP command x%x residual overrun error. "
e8b62011
JS
1650 "Data: x%x x%x \n", cmnd->cmnd[0],
1651 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e
JB
1652 host_status = DID_ERROR;
1653
1654 /*
1655 * Check SLI validation that all the transfer was actually done
1656 * (fcpi_parm should be zero). Apply check only to reads.
1657 */
1658 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
1659 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
e8b62011 1660 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 1661 "9029 FCP Read Check Error Data: "
e8b62011
JS
1662 "x%x x%x x%x x%x\n",
1663 be32_to_cpu(fcpcmd->fcpDl),
1664 be32_to_cpu(fcprsp->rspResId),
1665 fcpi_parm, cmnd->cmnd[0]);
dea3101e 1666 host_status = DID_ERROR;
a0b4f78f 1667 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e
JB
1668 }
1669
1670 out:
1671 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 1672 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e
JB
1673}
1674
9bad7671 1675/**
3621a710 1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
1677 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1680 *
1681 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as
1683 * well by ramping down device queue depth.
1684 **/
dea3101e
JB
1685static void
1686lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1687 struct lpfc_iocbq *pIocbOut)
1688{
1689 struct lpfc_scsi_buf *lpfc_cmd =
1690 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 1691 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e
JB
1692 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1693 struct lpfc_nodelist *pnode = rdata->pnode;
1694 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
445cf4f4 1695 int result;
a257bf90 1696 struct scsi_device *tmp_sdev;
445cf4f4 1697 int depth = 0;
fa61a54e 1698 unsigned long flags;
ea2151b4 1699 struct lpfc_fast_path_event *fast_path_evt;
a257bf90
JS
1700 struct Scsi_Host *shost = cmd->device->host;
1701 uint32_t queue_depth, scsi_id;
dea3101e
JB
1702
1703 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
1704 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
109f6ed0
JS
1705 if (pnode && NLP_CHK_NODE_ACT(pnode))
1706 atomic_dec(&pnode->cmd_pending);
dea3101e
JB
1707
1708 if (lpfc_cmd->status) {
1709 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1710 (lpfc_cmd->result & IOERR_DRVR_MASK))
1711 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1712 else if (lpfc_cmd->status >= IOSTAT_CNT)
1713 lpfc_cmd->status = IOSTAT_DEFAULT;
1714
e8b62011 1715 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 1716 "9030 FCP cmd x%x failed <%d/%d> "
e8b62011
JS
1717 "status: x%x result: x%x Data: x%x x%x\n",
1718 cmd->cmnd[0],
1719 cmd->device ? cmd->device->id : 0xffff,
1720 cmd->device ? cmd->device->lun : 0xffff,
1721 lpfc_cmd->status, lpfc_cmd->result,
1722 pIocbOut->iocb.ulpContext,
1723 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e
JB
1724
1725 switch (lpfc_cmd->status) {
1726 case IOSTAT_FCP_RSP_ERROR:
1727 /* Call FCP RSP handler to determine result */
2e0fef85 1728 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e
JB
1729 break;
1730 case IOSTAT_NPORT_BSY:
1731 case IOSTAT_FABRIC_BSY:
0f1f53a7 1732 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
1733 fast_path_evt = lpfc_alloc_fast_evt(phba);
1734 if (!fast_path_evt)
1735 break;
1736 fast_path_evt->un.fabric_evt.event_type =
1737 FC_REG_FABRIC_EVENT;
1738 fast_path_evt->un.fabric_evt.subcategory =
1739 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
1740 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
1741 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1742 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
1743 &pnode->nlp_portname,
1744 sizeof(struct lpfc_name));
1745 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
1746 &pnode->nlp_nodename,
1747 sizeof(struct lpfc_name));
1748 }
1749 fast_path_evt->vport = vport;
1750 fast_path_evt->work_evt.evt =
1751 LPFC_EVT_FASTPATH_MGMT_EVT;
1752 spin_lock_irqsave(&phba->hbalock, flags);
1753 list_add_tail(&fast_path_evt->work_evt.evt_listp,
1754 &phba->work_list);
1755 spin_unlock_irqrestore(&phba->hbalock, flags);
1756 lpfc_worker_wake_up(phba);
dea3101e 1757 break;
92d7f7b0 1758 case IOSTAT_LOCAL_REJECT:
d7c255b2 1759 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 1760 lpfc_cmd->result == IOERR_NO_RESOURCES ||
d7c255b2 1761 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
92d7f7b0 1762 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb 1763 break;
e2a0a9d6
JS
1764 }
1765
1766 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1767 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1768 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1769 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1770 /*
1771 * This is a response for a BG enabled
1772 * cmd. Parse BG error
1773 */
1774 lpfc_parse_bg_err(phba, lpfc_cmd,
1775 pIocbOut);
1776 break;
1777 } else {
1778 lpfc_printf_vlog(vport, KERN_WARNING,
1779 LOG_BG,
1780 "9031 non-zero BGSTAT "
1781 "on unprotected cmd");
1782 }
1783 }
1784
1785 /* else: fall through */
dea3101e
JB
1786 default:
1787 cmd->result = ScsiResult(DID_ERROR, 0);
1788 break;
1789 }
1790
58da1ffb 1791 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 1792 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
1793 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1794 SAM_STAT_BUSY);
dea3101e
JB
1795 } else {
1796 cmd->result = ScsiResult(DID_OK, 0);
1797 }
1798
1799 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1800 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1801
e8b62011
JS
1802 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1803 "0710 Iodone <%d/%d> cmd %p, error "
1804 "x%x SNS x%x x%x Data: x%x x%x\n",
1805 cmd->device->id, cmd->device->lun, cmd,
1806 cmd->result, *lp, *(lp + 3), cmd->retries,
1807 scsi_get_resid(cmd));
dea3101e
JB
1808 }
1809
ea2151b4 1810 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4 1811 result = cmd->result;
977b5a0a
JS
1812 if (vport->cfg_max_scsicmpl_time &&
1813 time_after(jiffies, lpfc_cmd->start_time +
1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 1815 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1817 if (pnode->cmd_qdepth >
1818 atomic_read(&pnode->cmd_pending) &&
1819 (atomic_read(&pnode->cmd_pending) >
1820 LPFC_MIN_TGT_QDEPTH) &&
1821 ((cmd->cmnd[0] == READ_10) ||
1822 (cmd->cmnd[0] == WRITE_10)))
1823 pnode->cmd_qdepth =
1824 atomic_read(&pnode->cmd_pending);
1825
1826 pnode->last_change_time = jiffies;
1827 }
a257bf90 1828 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0
JS
1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
977b5a0a 1831 time_after(jiffies, pnode->last_change_time +
109f6ed0 1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
a257bf90 1833 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1838 pnode->last_change_time = jiffies;
a257bf90 1839 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 1840 }
977b5a0a
JS
1841 }
1842
1dcb58e5 1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90
JS
1844
1845 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
1846 queue_depth = cmd->device->queue_depth;
1847 scsi_id = cmd->device->id;
0bd4ca25
JSEC
1848 cmd->scsi_done(cmd);
1849
b808608b 1850 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
fa61a54e
JS
1851 /*
1852 * If there is a thread waiting for command completion
1853 * wake up the thread.
1854 */
a257bf90 1855 spin_lock_irqsave(shost->host_lock, flags);
495a714c 1856 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
1857 if (lpfc_cmd->waitq)
1858 wake_up(lpfc_cmd->waitq);
a257bf90 1859 spin_unlock_irqrestore(shost->host_lock, flags);
b808608b
JW
1860 lpfc_release_scsi_buf(phba, lpfc_cmd);
1861 return;
1862 }
1863
92d7f7b0
JS
1864
1865 if (!result)
a257bf90 1866 lpfc_rampup_queue_depth(vport, queue_depth);
92d7f7b0 1867
58da1ffb 1868 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
445cf4f4
JSEC
1869 ((jiffies - pnode->last_ramp_up_time) >
1870 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1871 ((jiffies - pnode->last_q_full_time) >
1872 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
a257bf90
JS
1873 (vport->cfg_lun_queue_depth > queue_depth)) {
1874 shost_for_each_device(tmp_sdev, shost) {
3de2a653 1875 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
a257bf90 1876 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
1877 continue;
1878 if (tmp_sdev->ordered_tags)
1879 scsi_adjust_queue_depth(tmp_sdev,
1880 MSG_ORDERED_TAG,
1881 tmp_sdev->queue_depth+1);
1882 else
1883 scsi_adjust_queue_depth(tmp_sdev,
1884 MSG_SIMPLE_TAG,
1885 tmp_sdev->queue_depth+1);
1886
1887 pnode->last_ramp_up_time = jiffies;
1888 }
1889 }
ea2151b4
JS
1890 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1891 0xFFFFFFFF,
a257bf90 1892 queue_depth , queue_depth + 1);
445cf4f4
JSEC
1893 }
1894
1895 /*
1896 * Check for queue full. If the lun is reporting queue full, then
1897 * back off the lun queue depth to prevent target overloads.
1898 */
58da1ffb
JS
1899 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1900 NLP_CHK_NODE_ACT(pnode)) {
445cf4f4
JSEC
1901 pnode->last_q_full_time = jiffies;
1902
a257bf90
JS
1903 shost_for_each_device(tmp_sdev, shost) {
1904 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
1905 continue;
1906 depth = scsi_track_queue_full(tmp_sdev,
1907 tmp_sdev->queue_depth - 1);
1908 }
1909 /*
2e0fef85 1910 * The queue depth cannot be lowered any more.
445cf4f4
JSEC
1911 * Modify the returned error code to store
1912 * the final depth value set by
1913 * scsi_track_queue_full.
1914 */
1915 if (depth == -1)
a257bf90 1916 depth = shost->cmd_per_lun;
445cf4f4
JSEC
1917
1918 if (depth) {
e8b62011
JS
1919 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1920 "0711 detected queue full - lun queue "
1921 "depth adjusted to %d.\n", depth);
ea2151b4
JS
1922 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1923 pnode, 0xFFFFFFFF,
1924 depth+1, depth);
445cf4f4
JSEC
1925 }
1926 }
1927
fa61a54e
JS
1928 /*
1929 * If there is a thread waiting for command completion
1930 * wake up the thread.
1931 */
a257bf90 1932 spin_lock_irqsave(shost->host_lock, flags);
495a714c 1933 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
1934 if (lpfc_cmd->waitq)
1935 wake_up(lpfc_cmd->waitq);
a257bf90 1936 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 1937
0bd4ca25 1938 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
1939}
1940
34b02dcd 1941/**
3621a710 1942 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
34b02dcd
JS
1943 * @data: A pointer to the immediate command data portion of the IOCB.
1944 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1945 *
1946 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1947 * byte swapping the data to big endian format for transmission on the wire.
1948 **/
1949static void
1950lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1951{
1952 int i, j;
1953 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1954 i += sizeof(uint32_t), j++) {
1955 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1956 }
1957}
1958
9bad7671 1959/**
3621a710 1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit
9bad7671
JS
1961 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist.
1964 *
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966 * to transfer.
1967 **/
dea3101e 1968static void
2e0fef85
JS
1969lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode)
dea3101e 1971{
2e0fef85 1972 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
1973 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1974 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1975 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1976 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1977 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 1978 char tag[2];
dea3101e 1979
58da1ffb
JS
1980 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1981 return;
1982
dea3101e 1983 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
1984 /* clear task management bits */
1985 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 1986
91886523
JSEC
1987 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1988 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e
JB
1989
1990 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1991
7e2b19fb
JS
1992 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1993 switch (tag[0]) {
dea3101e
JB
1994 case HEAD_OF_QUEUE_TAG:
1995 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1996 break;
1997 case ORDERED_QUEUE_TAG:
1998 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1999 break;
2000 default:
2001 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2002 break;
2003 }
2004 } else
2005 fcp_cmnd->fcpCntl1 = 0;
2006
2007 /*
2008 * There are three possibilities here - use scatter-gather segment, use
2009 * the single mapping, or neither. Start the lpfc command prep by
2010 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2011 * data bde entry.
2012 */
a0b4f78f 2013 if (scsi_sg_count(scsi_cmnd)) {
dea3101e
JB
2014 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0;
2017 iocb_cmd->ulpPU = 0;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++;
2020 } else {
2021 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2022 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e
JB
2023 fcp_cmnd->fcpCntl3 = READ_DATA;
2024 phba->fc4InputRequests++;
2025 }
2026 } else {
2027 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2028 iocb_cmd->un.fcpi.fcpi_parm = 0;
2029 iocb_cmd->ulpPU = 0;
2030 fcp_cmnd->fcpCntl3 = 0;
2031 phba->fc4ControlRequests++;
2032 }
e2a0a9d6
JS
2033 if (phba->sli_rev == 3 &&
2034 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 2035 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e
JB
2036 /*
2037 * Finish initializing those IOCB fields that are independent
2038 * of the scsi_cmnd request_buffer
2039 */
2040 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2041 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2042 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
2043 else
2044 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e
JB
2045
2046 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2047 piocbq->context1 = lpfc_cmd;
2048 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2049 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 2050 piocbq->vport = vport;
dea3101e
JB
2051}
2052
9bad7671 2053/**
3621a710 2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
9bad7671
JS
2055 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command.
2059 *
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2061 *
2062 * Return codes:
2063 * 0 - Error
2064 * 1 - Success
2065 **/
dea3101e 2066static int
2e0fef85 2067lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 2068 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 2069 unsigned int lun,
dea3101e
JB
2070 uint8_t task_mgmt_cmd)
2071{
dea3101e
JB
2072 struct lpfc_iocbq *piocbq;
2073 IOCB_t *piocb;
2074 struct fcp_cmnd *fcp_cmnd;
0b18ac42 2075 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e
JB
2076 struct lpfc_nodelist *ndlp = rdata->pnode;
2077
58da1ffb
JS
2078 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2079 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 2080 return 0;
dea3101e 2081
dea3101e 2082 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
2083 piocbq->vport = vport;
2084
dea3101e
JB
2085 piocb = &piocbq->iocb;
2086
2087 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
2088 /* Clear out any old data in the FCP command area */
2089 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2090 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 2091 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
2092 if (vport->phba->sli_rev == 3 &&
2093 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 2094 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 2095 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e
JB
2096 piocb->ulpContext = ndlp->nlp_rpi;
2097 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2098 piocb->ulpFCP2Rcvy = 1;
2099 }
2100 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2101
2102 /* ulpTimeout is only one byte */
2103 if (lpfc_cmd->timeout > 0xff) {
2104 /*
2105 * Do not timeout the command at the firmware level.
2106 * The driver will provide the timeout mechanism.
2107 */
2108 piocb->ulpTimeout = 0;
2109 } else {
2110 piocb->ulpTimeout = lpfc_cmd->timeout;
2111 }
2112
2e0fef85 2113 return 1;
dea3101e
JB
2114}
2115
9bad7671 2116/**
3621a710 2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
2118 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2120 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2121 *
2122 * This routine is IOCB completion routine for device reset and target reset
2123 * routine. This routine release scsi buffer associated with lpfc_cmd.
2124 **/
7054a606
JS
2125static void
2126lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2127 struct lpfc_iocbq *cmdiocbq,
2128 struct lpfc_iocbq *rspiocbq)
2129{
2130 struct lpfc_scsi_buf *lpfc_cmd =
2131 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2132 if (lpfc_cmd)
2133 lpfc_release_scsi_buf(phba, lpfc_cmd);
2134 return;
2135}
2136
9bad7671 2137/**
3621a710 2138 * lpfc_scsi_tgt_reset - Target reset handler
9bad7671
JS
2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2140 * @vport: The virtual port for which this call is being executed.
2141 * @tgt_id: Target ID.
2142 * @lun: Lun number.
2143 * @rdata: Pointer to lpfc_rport_data.
2144 *
2145 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2146 *
2147 * Return Code:
2148 * 0x2003 - Error
2149 * 0x2002 - Success.
2150 **/
dea3101e 2151static int
2e0fef85 2152lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
420b630d
JS
2153 unsigned tgt_id, unsigned int lun,
2154 struct lpfc_rport_data *rdata)
dea3101e 2155{
2e0fef85 2156 struct lpfc_hba *phba = vport->phba;
dea3101e 2157 struct lpfc_iocbq *iocbq;
0bd4ca25 2158 struct lpfc_iocbq *iocbqrsp;
dea3101e 2159 int ret;
915caaaf 2160 int status;
dea3101e 2161
58da1ffb 2162 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
f5603511
JS
2163 return FAILED;
2164
0b18ac42 2165 lpfc_cmd->rdata = rdata;
915caaaf 2166 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
420b630d 2167 FCP_TARGET_RESET);
915caaaf 2168 if (!status)
dea3101e
JB
2169 return FAILED;
2170
dea3101e 2171 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25
JSEC
2172 iocbqrsp = lpfc_sli_get_iocbq(phba);
2173
dea3101e
JB
2174 if (!iocbqrsp)
2175 return FAILED;
dea3101e 2176
0b18ac42 2177 /* Issue Target Reset to TGT <num> */
e8b62011
JS
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
915caaaf 2181 status = lpfc_sli_issue_iocb_wait(phba,
68876920
JSEC
2182 &phba->sli.ring[phba->sli.fcp_ring],
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
915caaaf
JS
2184 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) {
7054a606 2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
915caaaf
JS
2187 ret = TIMEOUT_ERROR;
2188 } else
2189 ret = FAILED;
dea3101e 2190 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
dea3101e
JB
2191 } else {
2192 ret = SUCCESS;
2193 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2194 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2195 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2196 (lpfc_cmd->result & IOERR_DRVR_MASK))
2197 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2198 }
2199
604a3e30 2200 lpfc_sli_release_iocbq(phba, iocbqrsp);
dea3101e
JB
2201 return ret;
2202}
2203
9bad7671 2204/**
3621a710 2205 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
2206 * @host: The scsi host for which this call is being executed.
2207 *
2208 * This routine provides module information about hba.
2209 *
2210 * Reutrn code:
2211 * Pointer to char - Success.
2212 **/
dea3101e
JB
2213const char *
2214lpfc_info(struct Scsi_Host *host)
2215{
2e0fef85
JS
2216 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2217 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2218 int len;
2219 static char lpfcinfobuf[384];
2220
2221 memset(lpfcinfobuf,0,384);
2222 if (phba && phba->pcidev){
2223 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2224 len = strlen(lpfcinfobuf);
2225 snprintf(lpfcinfobuf + len,
2226 384-len,
2227 " on PCI bus %02x device %02x irq %d",
2228 phba->pcidev->bus->number,
2229 phba->pcidev->devfn,
2230 phba->pcidev->irq);
2231 len = strlen(lpfcinfobuf);
2232 if (phba->Port[0]) {
2233 snprintf(lpfcinfobuf + len,
2234 384-len,
2235 " port %s",
2236 phba->Port);
2237 }
2238 }
2239 return lpfcinfobuf;
2240}
2241
9bad7671 2242/**
3621a710 2243 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
2244 * @phba: The Hba for which this call is being executed.
2245 *
2246 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2247 * The default value of cfg_poll_tmo is 10 milliseconds.
2248 **/
875fbdfe
JSEC
2249static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2250{
2251 unsigned long poll_tmo_expires =
2252 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2253
2254 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2255 mod_timer(&phba->fcp_poll_timer,
2256 poll_tmo_expires);
2257}
2258
9bad7671 2259/**
3621a710 2260 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
2261 * @phba: The Hba for which this call is being executed.
2262 *
2263 * This routine starts the fcp_poll_timer of @phba.
2264 **/
875fbdfe
JSEC
2265void lpfc_poll_start_timer(struct lpfc_hba * phba)
2266{
2267 lpfc_poll_rearm_timer(phba);
2268}
2269
9bad7671 2270/**
3621a710 2271 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
2272 * @ptr: Map to lpfc_hba data structure pointer.
2273 *
2274 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2275 * and FCP Ring interrupt is disable.
2276 **/
2277
875fbdfe
JSEC
2278void lpfc_poll_timeout(unsigned long ptr)
2279{
2e0fef85 2280 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
2281
2282 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2283 lpfc_sli_poll_fcp_ring (phba);
2284 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2285 lpfc_poll_rearm_timer(phba);
2286 }
875fbdfe
JSEC
2287}
2288
9bad7671 2289/**
3621a710 2290 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
2291 * @cmnd: Pointer to scsi_cmnd data structure.
2292 * @done: Pointer to done routine.
2293 *
2294 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2295 * This routine prepares an IOCB from scsi command and provides to firmware.
2296 * The @done callback is invoked after driver finished processing the command.
2297 *
2298 * Return value :
2299 * 0 - Success
2300 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2301 **/
dea3101e
JB
2302static int
2303lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2304{
2e0fef85
JS
2305 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
dea3101e
JB
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode;
0bd4ca25 2311 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 2312 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 2313 int err;
dea3101e 2314
19a7b4ae
JSEC
2315 err = fc_remote_port_chkready(rport);
2316 if (err) {
2317 cmnd->result = err;
dea3101e
JB
2318 goto out_fail_command;
2319 }
2320
e2a0a9d6
JS
2321 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2322 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2323
2324 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2325 "str=%s without registering for BlockGuard - "
2326 "Rejecting command\n",
2327 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2328 dif_op_str[scsi_get_prot_op(cmnd)]);
2329 goto out_fail_command;
2330 }
2331
dea3101e 2332 /*
19a7b4ae
JSEC
2333 * Catch race where our node has transitioned, but the
2334 * transport is still transitioning.
dea3101e 2335 */
b522d7d4
JS
2336 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2337 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2338 goto out_fail_command;
2339 }
109f6ed0
JS
2340 if (vport->cfg_max_scsicmpl_time &&
2341 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
977b5a0a 2342 goto out_host_busy;
a93ce024 2343
ed957684 2344 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 2345 if (lpfc_cmd == NULL) {
eaf15d5b 2346 lpfc_rampdown_queue_depth(phba);
92d7f7b0 2347
e8b62011
JS
2348 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2349 "0707 driver's buffer pool is empty, "
2350 "IO busied\n");
dea3101e
JB
2351 goto out_host_busy;
2352 }
2353
2354 /*
2355 * Store the midlayer's command structure for the completion phase
2356 * and complete the command initialization.
2357 */
2358 lpfc_cmd->pCmd = cmnd;
2359 lpfc_cmd->rdata = rdata;
2360 lpfc_cmd->timeout = 0;
977b5a0a 2361 lpfc_cmd->start_time = jiffies;
dea3101e
JB
2362 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2363 cmnd->scsi_done = done;
2364
e2a0a9d6
JS
2365 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2366 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2367 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2368 "str=%s\n",
2369 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2370 dif_op_str[scsi_get_prot_op(cmnd)]);
2371 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2372 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2373 "%02x %02x %02x %02x %02x \n",
2374 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2375 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2376 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2377 cmnd->cmnd[9]);
2378 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, "
2381 "count %lu\n",
87b5c328 2382 (unsigned long long)scsi_get_lba(cmnd),
e2a0a9d6
JS
2383 cmnd->request->nr_sectors);
2384 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n",
87b5c328 2388 (unsigned long long)scsi_get_lba(cmnd),
e2a0a9d6
JS
2389 cmnd->request->nr_sectors,
2390 cmnd);
2391
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2393 } else {
2394 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2395 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2396 " str=%s\n",
2397 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2398 dif_op_str[scsi_get_prot_op(cmnd)]);
2399 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2400 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2401 "%02x %02x %02x %02x %02x \n",
2402 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2403 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2404 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2405 cmnd->cmnd[9]);
2406 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, "
87b5c328
JS
2409 "count %lu\n",
2410 (unsigned long long)scsi_get_lba(cmnd),
e2a0a9d6
JS
2411 cmnd->request->nr_sectors);
2412 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n",
87b5c328 2416 (unsigned long long)scsi_get_lba(cmnd),
e2a0a9d6
JS
2417 cmnd->request->nr_sectors, cmnd);
2418 else
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n");
2421 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2422 }
2423
dea3101e
JB
2424 if (err)
2425 goto out_host_busy_free_buf;
2426
2e0fef85 2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 2428
977b5a0a 2429 atomic_inc(&ndlp->cmd_pending);
dea3101e 2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
92d7f7b0 2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b
JS
2432 if (err) {
2433 atomic_dec(&ndlp->cmd_pending);
dea3101e 2434 goto out_host_busy_free_buf;
eaf15d5b 2435 }
875fbdfe
JSEC
2436 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2437 lpfc_sli_poll_fcp_ring(phba);
2438 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2439 lpfc_poll_rearm_timer(phba);
2440 }
2441
dea3101e
JB
2442 return 0;
2443
2444 out_host_busy_free_buf:
bcf4dbfa 2445 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 2446 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e
JB
2447 out_host_busy:
2448 return SCSI_MLQUEUE_HOST_BUSY;
2449
2450 out_fail_command:
2451 done(cmnd);
2452 return 0;
2453}
2454
9bad7671 2455/**
3621a710 2456 * lpfc_block_error_handler - Routine to block error handler
9bad7671
JS
2457 * @cmnd: Pointer to scsi_cmnd data structure.
2458 *
2459 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2460 **/
a90f5684
JS
2461static void
2462lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2463{
2464 struct Scsi_Host *shost = cmnd->device->host;
2465 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2466
2467 spin_lock_irq(shost->host_lock);
2468 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2469 spin_unlock_irq(shost->host_lock);
2470 msleep(1000);
2471 spin_lock_irq(shost->host_lock);
2472 }
2473 spin_unlock_irq(shost->host_lock);
2474 return;
2475}
63c59c3b 2476
9bad7671 2477/**
3621a710 2478 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
2479 * @cmnd: Pointer to scsi_cmnd data structure.
2480 *
2481 * This routine aborts @cmnd pending in base driver.
2482 *
2483 * Return code :
2484 * 0x2003 - Error
2485 * 0x2002 - Success
2486 **/
dea3101e 2487static int
63c59c3b 2488lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 2489{
2e0fef85
JS
2490 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba;
dea3101e 2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
0bd4ca25
JSEC
2494 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb;
dea3101e 2496 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 2497 IOCB_t *cmd, *icmd;
0bd4ca25 2498 int ret = SUCCESS;
fa61a54e 2499 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 2500
a90f5684 2501 lpfc_block_error_handler(cmnd);
0bd4ca25
JSEC
2502 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2503 BUG_ON(!lpfc_cmd);
dea3101e 2504
0bd4ca25
JSEC
2505 /*
2506 * If pCmd field of the corresponding lpfc_scsi_buf structure
2507 * points to a different SCSI command, then the driver has
2508 * already completed this command, but the midlayer did not
2509 * see the completion before the eh fired. Just return
2510 * SUCCESS.
2511 */
2512 iocb = &lpfc_cmd->cur_iocbq;
2513 if (lpfc_cmd->pCmd != cmnd)
2514 goto out;
dea3101e 2515
0bd4ca25 2516 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 2517
0bd4ca25
JSEC
2518 abtsiocb = lpfc_sli_get_iocbq(phba);
2519 if (abtsiocb == NULL) {
2520 ret = FAILED;
dea3101e
JB
2521 goto out;
2522 }
2523
dea3101e 2524 /*
0bd4ca25
JSEC
2525 * The scsi command can not be in txq and it is in flight because the
2526 * pCmd is still pointig at the SCSI command we have to abort. There
2527 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 2528 */
dea3101e 2529
0bd4ca25
JSEC
2530 cmd = &iocb->iocb;
2531 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 2535
0bd4ca25
JSEC
2536 icmd->ulpLe = 1;
2537 icmd->ulpClass = cmd->ulpClass;
2e0fef85 2538 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
2539 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2540 else
2541 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 2542
0bd4ca25 2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 2544 abtsiocb->vport = vport;
0bd4ca25
JSEC
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb);
2547 ret = FAILED;
2548 goto out;
2549 }
dea3101e 2550
875fbdfe
JSEC
2551 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2552 lpfc_sli_poll_fcp_ring (phba);
2553
fa61a54e 2554 lpfc_cmd->waitq = &waitq;
0bd4ca25 2555 /* Wait for abort to complete */
fa61a54e
JS
2556 wait_event_timeout(waitq,
2557 (lpfc_cmd->pCmd != cmnd),
2558 (2*vport->cfg_devloss_tmo*HZ));
875fbdfe 2559
fa61a54e
JS
2560 spin_lock_irq(shost->host_lock);
2561 lpfc_cmd->waitq = NULL;
2562 spin_unlock_irq(shost->host_lock);
dea3101e 2563
0bd4ca25
JSEC
2564 if (lpfc_cmd->pCmd == cmnd) {
2565 ret = FAILED;
e8b62011
JS
2566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2567 "0748 abort handler timed out waiting "
2568 "for abort to complete: ret %#x, ID %d, "
2569 "LUN %d, snum %#lx\n",
2570 ret, cmnd->device->id, cmnd->device->lun,
2571 cmnd->serial_number);
dea3101e
JB
2572 }
2573
2574 out:
e8b62011
JS
2575 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2576 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
2577 "LUN %d snum %#lx\n", ret, cmnd->device->id,
2578 cmnd->device->lun, cmnd->serial_number);
63c59c3b 2579 return ret;
8fa728a2
JG
2580}
2581
9bad7671 2582/**
3621a710 2583 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
9bad7671
JS
2584 * @cmnd: Pointer to scsi_cmnd data structure.
2585 *
2586 * This routine does a device reset by sending a TARGET_RESET task management
2587 * command.
2588 *
2589 * Return code :
2590 * 0x2003 - Error
3621a710 2591 * 0x2002 - Success
9bad7671 2592 **/
dea3101e 2593static int
7054a606 2594lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 2595{
2e0fef85
JS
2596 struct Scsi_Host *shost = cmnd->device->host;
2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2598 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
2599 struct lpfc_scsi_buf *lpfc_cmd;
2600 struct lpfc_iocbq *iocbq, *iocbqrsp;
dea3101e
JB
2601 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2602 struct lpfc_nodelist *pnode = rdata->pnode;
915caaaf
JS
2603 unsigned long later;
2604 int ret = SUCCESS;
2605 int status;
2606 int cnt;
ea2151b4 2607 struct lpfc_scsi_event_header scsi_event;
dea3101e 2608
a90f5684 2609 lpfc_block_error_handler(cmnd);
dea3101e
JB
2610 /*
2611 * If target is not in a MAPPED state, delay the reset until
c01f3208 2612 * target is rediscovered or devloss timeout expires.
dea3101e 2613 */
915caaaf
JS
2614 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2615 while (time_after(later, jiffies)) {
58da1ffb 2616 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
915caaaf 2617 return FAILED;
f5603511 2618 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
dea3101e 2619 break;
915caaaf
JS
2620 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2621 rdata = cmnd->device->hostdata;
2622 if (!rdata)
2623 break;
2624 pnode = rdata->pnode;
2625 }
ea2151b4
JS
2626
2627 scsi_event.event_type = FC_REG_SCSI_EVENT;
2628 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2629 scsi_event.lun = 0;
2630 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2631 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2632
2633 fc_host_post_vendor_event(shost,
2634 fc_get_event_number(),
2635 sizeof(scsi_event),
2636 (char *)&scsi_event,
ddcc50f0 2637 LPFC_NL_VENDOR_ID);
ea2151b4 2638
915caaaf
JS
2639 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2640 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2641 "0721 LUN Reset rport "
2642 "failure: msec x%x rdata x%p\n",
2643 jiffies_to_msecs(jiffies - later), rdata);
2644 return FAILED;
dea3101e 2645 }
2e0fef85 2646 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 2647 if (lpfc_cmd == NULL)
915caaaf 2648 return FAILED;
dea3101e 2649 lpfc_cmd->timeout = 60;
0b18ac42 2650 lpfc_cmd->rdata = rdata;
dea3101e 2651
915caaaf
JS
2652 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
2653 cmnd->device->lun,
2654 FCP_TARGET_RESET);
2655 if (!status) {
2656 lpfc_release_scsi_buf(phba, lpfc_cmd);
2657 return FAILED;
2658 }
dea3101e
JB
2659 iocbq = &lpfc_cmd->cur_iocbq;
2660
2661 /* get a buffer for this IOCB command response */
0bd4ca25 2662 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
2663 if (iocbqrsp == NULL) {
2664 lpfc_release_scsi_buf(phba, lpfc_cmd);
2665 return FAILED;
2666 }
e8b62011
JS
2667 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2668 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
915caaaf
JS
2671 status = lpfc_sli_issue_iocb_wait(phba,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) {
7054a606 2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
915caaaf
JS
2676 ret = TIMEOUT_ERROR;
2677 } else {
2678 if (status != IOCB_SUCCESS)
2679 ret = FAILED;
2680 lpfc_release_scsi_buf(phba, lpfc_cmd);
2681 }
2682 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2683 "0713 SCSI layer issued device reset (%d, %d) "
2684 "return x%x status x%x result x%x\n",
2685 cmnd->device->id, cmnd->device->lun, ret,
2686 iocbqrsp->iocb.ulpStatus,
2687 iocbqrsp->iocb.un.ulpWord[4]);
6175c02a 2688 lpfc_sli_release_iocbq(phba, iocbqrsp);
51ef4c26 2689 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
915caaaf 2690 LPFC_CTX_TGT);
6175c02a 2691 if (cnt)
51ef4c26 2692 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
6175c02a 2693 cmnd->device->id, cmnd->device->lun,
915caaaf
JS
2694 LPFC_CTX_TGT);
2695 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2696 while (time_after(later, jiffies) && cnt) {
2697 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
51ef4c26 2698 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
915caaaf 2699 cmnd->device->lun, LPFC_CTX_TGT);
dea3101e 2700 }
dea3101e 2701 if (cnt) {
e8b62011
JS
2702 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2703 "0719 device reset I/O flush failure: "
2704 "cnt x%x\n", cnt);
0bd4ca25 2705 ret = FAILED;
dea3101e 2706 }
dea3101e
JB
2707 return ret;
2708}
2709
9bad7671 2710/**
3621a710 2711 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
2712 * @cmnd: Pointer to scsi_cmnd data structure.
2713 *
2714 * This routine does target reset to all target on @cmnd->device->host.
2715 *
2716 * Return Code:
2717 * 0x2003 - Error
2718 * 0x2002 - Success
2719 **/
94d0e7b8 2720static int
7054a606 2721lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 2722{
2e0fef85
JS
2723 struct Scsi_Host *shost = cmnd->device->host;
2724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2725 struct lpfc_hba *phba = vport->phba;
dea3101e
JB
2726 struct lpfc_nodelist *ndlp = NULL;
2727 int match;
d7c255b2 2728 int ret = SUCCESS, status = SUCCESS, i;
915caaaf 2729 int cnt;
0bd4ca25 2730 struct lpfc_scsi_buf * lpfc_cmd;
915caaaf 2731 unsigned long later;
ea2151b4
JS
2732 struct lpfc_scsi_event_header scsi_event;
2733
2734 scsi_event.event_type = FC_REG_SCSI_EVENT;
2735 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
2736 scsi_event.lun = 0;
2737 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2738 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2739
2740 fc_host_post_vendor_event(shost,
2741 fc_get_event_number(),
2742 sizeof(scsi_event),
2743 (char *)&scsi_event,
ddcc50f0 2744 LPFC_NL_VENDOR_ID);
dea3101e 2745
a90f5684 2746 lpfc_block_error_handler(cmnd);
dea3101e
JB
2747 /*
2748 * Since the driver manages a single bus device, reset all
2749 * targets known to the driver. Should any target reset
2750 * fail, this routine returns failure to the midlayer.
2751 */
e17da18e 2752 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 2753 /* Search for mapped node by target ID */
dea3101e 2754 match = 0;
2e0fef85
JS
2755 spin_lock_irq(shost->host_lock);
2756 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
2757 if (!NLP_CHK_NODE_ACT(ndlp))
2758 continue;
685f0bf7 2759 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 2760 ndlp->nlp_sid == i &&
685f0bf7 2761 ndlp->rport) {
dea3101e
JB
2762 match = 1;
2763 break;
2764 }
2765 }
2e0fef85 2766 spin_unlock_irq(shost->host_lock);
dea3101e
JB
2767 if (!match)
2768 continue;
915caaaf
JS
2769 lpfc_cmd = lpfc_get_scsi_buf(phba);
2770 if (lpfc_cmd) {
2771 lpfc_cmd->timeout = 60;
2772 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
2773 cmnd->device->lun,
2774 ndlp->rport->dd_data);
2775 if (status != TIMEOUT_ERROR)
2776 lpfc_release_scsi_buf(phba, lpfc_cmd);
2777 }
2778 if (!lpfc_cmd || status != SUCCESS) {
e8b62011
JS
2779 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2780 "0700 Bus Reset on target %d failed\n",
2781 i);
915caaaf 2782 ret = FAILED;
dea3101e
JB
2783 }
2784 }
6175c02a
JSEC
2785 /*
2786 * All outstanding txcmplq I/Os should have been aborted by
2787 * the targets. Unfortunately, some targets do not abide by
2788 * this forcing the driver to double check.
2789 */
51ef4c26 2790 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
6175c02a 2791 if (cnt)
51ef4c26
JS
2792 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2793 0, 0, LPFC_CTX_HOST);
915caaaf
JS
2794 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2795 while (time_after(later, jiffies) && cnt) {
2796 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
51ef4c26 2797 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
dea3101e 2798 }
dea3101e 2799 if (cnt) {
e8b62011
JS
2800 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801 "0715 Bus Reset I/O flush failure: "
2802 "cnt x%x left x%x\n", cnt, i);
0bd4ca25 2803 ret = FAILED;
6175c02a 2804 }
e8b62011
JS
2805 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2806 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e
JB
2807 return ret;
2808}
2809
9bad7671 2810/**
3621a710 2811 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
2812 * @sdev: Pointer to scsi_device.
2813 *
2814 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2815 * globally available list of scsi buffers. This routine also makes sure scsi
2816 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2817 * of scsi buffer exists for the lifetime of the driver.
2818 *
2819 * Return codes:
2820 * non-0 - Error
2821 * 0 - Success
2822 **/
dea3101e
JB
2823static int
2824lpfc_slave_alloc(struct scsi_device *sdev)
2825{
2e0fef85
JS
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba;
dea3101e 2828 struct lpfc_scsi_buf *scsi_buf = NULL;
19a7b4ae 2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
dea3101e
JB
2830 uint32_t total = 0, i;
2831 uint32_t num_to_alloc = 0;
2832 unsigned long flags;
dea3101e 2833
19a7b4ae 2834 if (!rport || fc_remote_port_chkready(rport))
dea3101e
JB
2835 return -ENXIO;
2836
19a7b4ae 2837 sdev->hostdata = rport->dd_data;
dea3101e
JB
2838
2839 /*
2840 * Populate the cmds_per_lun count scsi_bufs into this host's globally
2841 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
2842 * HBA limit conveyed to the midlayer via the host structure. The
2843 * formula accounts for the lun_queue_depth + error handlers + 1
2844 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e
JB
2845 */
2846 total = phba->total_scsi_bufs;
3de2a653 2847 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0
JS
2848
2849 /* Allow some exchanges to be available always to complete discovery */
2850 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
2851 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2852 "0704 At limitation of %d preallocated "
2853 "command buffers\n", total);
dea3101e 2854 return 0;
92d7f7b0
JS
2855 /* Allow some exchanges to be available always to complete discovery */
2856 } else if (total + num_to_alloc >
2857 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2859 "0705 Allocation request of %d "
2860 "command buffers will exceed max of %d. "
2861 "Reducing allocation request to %d.\n",
2862 num_to_alloc, phba->cfg_hba_queue_depth,
2863 (phba->cfg_hba_queue_depth - total));
dea3101e
JB
2864 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865 }
2866
2867 for (i = 0; i < num_to_alloc; i++) {
2e0fef85 2868 scsi_buf = lpfc_new_scsi_buf(vport);
dea3101e 2869 if (!scsi_buf) {
e8b62011
JS
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2871 "0706 Failed to allocate "
2872 "command buffer\n");
dea3101e
JB
2873 break;
2874 }
2875
875fbdfe 2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
dea3101e
JB
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
875fbdfe 2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
dea3101e
JB
2880 }
2881 return 0;
2882}
2883
9bad7671 2884/**
3621a710 2885 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
2886 * @sdev: Pointer to scsi_device.
2887 *
2888 * This routine configures following items
2889 * - Tag command queuing support for @sdev if supported.
2890 * - Dev loss time out value of fc_rport.
2891 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2892 *
2893 * Return codes:
2894 * 0 - Success
2895 **/
dea3101e
JB
2896static int
2897lpfc_slave_configure(struct scsi_device *sdev)
2898{
2e0fef85
JS
2899 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2900 struct lpfc_hba *phba = vport->phba;
2901 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
dea3101e
JB
2902
2903 if (sdev->tagged_supported)
3de2a653 2904 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 2905 else
3de2a653 2906 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e
JB
2907
2908 /*
2909 * Initialize the fc transport attributes for the target
2910 * containing this scsi device. Also note that the driver's
2911 * target pointer is stored in the starget_data for the
2912 * driver's sysfs entry point functions.
2913 */
3de2a653 2914 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
dea3101e 2915
875fbdfe
JSEC
2916 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2917 lpfc_sli_poll_fcp_ring(phba);
2918 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2919 lpfc_poll_rearm_timer(phba);
2920 }
2921
dea3101e
JB
2922 return 0;
2923}
2924
9bad7671 2925/**
3621a710 2926 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
2927 * @sdev: Pointer to scsi_device.
2928 *
2929 * This routine sets @sdev hostatdata filed to null.
2930 **/
dea3101e
JB
2931static void
2932lpfc_slave_destroy(struct scsi_device *sdev)
2933{
2934 sdev->hostdata = NULL;
2935 return;
2936}
2937
92d7f7b0 2938
dea3101e
JB
2939struct scsi_host_template lpfc_template = {
2940 .module = THIS_MODULE,
2941 .name = LPFC_DRIVER_NAME,
2942 .info = lpfc_info,
2943 .queuecommand = lpfc_queuecommand,
2944 .eh_abort_handler = lpfc_abort_handler,
7054a606
JS
2945 .eh_device_reset_handler= lpfc_device_reset_handler,
2946 .eh_bus_reset_handler = lpfc_bus_reset_handler,
dea3101e
JB
2947 .slave_alloc = lpfc_slave_alloc,
2948 .slave_configure = lpfc_slave_configure,
2949 .slave_destroy = lpfc_slave_destroy,
47a8617c 2950 .scan_finished = lpfc_scan_finished,
dea3101e 2951 .this_id = -1,
83108bd3 2952 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e
JB
2953 .cmd_per_lun = LPFC_CMD_PER_LUN,
2954 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 2955 .shost_attrs = lpfc_hba_attrs,
564b2960 2956 .max_sectors = 0xFFFF,
dea3101e 2957};
3de2a653
JS
2958
2959struct scsi_host_template lpfc_vport_template = {
2960 .module = THIS_MODULE,
2961 .name = LPFC_DRIVER_NAME,
2962 .info = lpfc_info,
2963 .queuecommand = lpfc_queuecommand,
2964 .eh_abort_handler = lpfc_abort_handler,
2965 .eh_device_reset_handler= lpfc_device_reset_handler,
2966 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2967 .slave_alloc = lpfc_slave_alloc,
2968 .slave_configure = lpfc_slave_configure,
2969 .slave_destroy = lpfc_slave_destroy,
2970 .scan_finished = lpfc_scan_finished,
2971 .this_id = -1,
83108bd3 2972 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
2973 .cmd_per_lun = LPFC_CMD_PER_LUN,
2974 .use_clustering = ENABLE_CLUSTERING,
2975 .shost_attrs = lpfc_vport_attrs,
2976 .max_sectors = 0xFFFF,
2977};