]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob
2494000
[thirdparty/kernel/stable.git] /
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
17 free_work);
18
19 qla2x00_free_fcport(fcport);
20 }
21
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27 struct completion *comp = sp->comp;
28
29 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
30 "%s: sp hdl %x, result=%x bsg ptr %p\n",
31 __func__, sp->handle, res, bsg_job);
32
33 /* ref: INIT */
34 kref_put(&sp->cmd_kref, qla2x00_sp_release);
35
36 bsg_reply->result = res;
37 bsg_job_done(bsg_job, bsg_reply->result,
38 bsg_reply->reply_payload_rcv_len);
39
40 if (comp)
41 complete(comp);
42 }
43
44 void qla2x00_bsg_sp_free(srb_t *sp)
45 {
46 struct qla_hw_data *ha = sp->vha->hw;
47 struct bsg_job *bsg_job = sp->u.bsg_job;
48 struct fc_bsg_request *bsg_request = bsg_job->request;
49 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
50
51 if (sp->type == SRB_FXIOCB_BCMD) {
52 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
53 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
54
55 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
56 dma_unmap_sg(&ha->pdev->dev,
57 bsg_job->request_payload.sg_list,
58 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
59
60 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
61 dma_unmap_sg(&ha->pdev->dev,
62 bsg_job->reply_payload.sg_list,
63 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
64 } else {
65
66 if (sp->remap.remapped) {
67 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
68 sp->remap.rsp.dma);
69 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
70 sp->remap.req.dma);
71 } else {
72 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
73 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
74
75 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
76 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
77 }
78 }
79
80 if (sp->type == SRB_CT_CMD ||
81 sp->type == SRB_FXIOCB_BCMD ||
82 sp->type == SRB_ELS_CMD_HST) {
83 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
84 queue_work(ha->wq, &sp->fcport->free_work);
85 }
86
87 qla2x00_rel_sp(sp);
88 }
89
90 int
91 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
92 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
93 {
94 int i, ret, num_valid;
95 uint8_t *bcode;
96 struct qla_fcp_prio_entry *pri_entry;
97 uint32_t *bcode_val_ptr, bcode_val;
98
99 ret = 1;
100 num_valid = 0;
101 bcode = (uint8_t *)pri_cfg;
102 bcode_val_ptr = (uint32_t *)pri_cfg;
103 bcode_val = (uint32_t)(*bcode_val_ptr);
104
105 if (bcode_val == 0xFFFFFFFF) {
106 /* No FCP Priority config data in flash */
107 ql_dbg(ql_dbg_user, vha, 0x7051,
108 "No FCP Priority config data.\n");
109 return 0;
110 }
111
112 if (memcmp(bcode, "HQOS", 4)) {
113 /* Invalid FCP priority data header*/
114 ql_dbg(ql_dbg_user, vha, 0x7052,
115 "Invalid FCP Priority data header. bcode=0x%x.\n",
116 bcode_val);
117 return 0;
118 }
119 if (flag != 1)
120 return ret;
121
122 pri_entry = &pri_cfg->entry[0];
123 for (i = 0; i < pri_cfg->num_entries; i++) {
124 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
125 num_valid++;
126 pri_entry++;
127 }
128
129 if (num_valid == 0) {
130 /* No valid FCP priority data entries */
131 ql_dbg(ql_dbg_user, vha, 0x7053,
132 "No valid FCP Priority data entries.\n");
133 ret = 0;
134 } else {
135 /* FCP priority data is valid */
136 ql_dbg(ql_dbg_user, vha, 0x7054,
137 "Valid FCP priority data. num entries = %d.\n",
138 num_valid);
139 }
140
141 return ret;
142 }
143
144 static int
145 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
146 {
147 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
148 struct fc_bsg_request *bsg_request = bsg_job->request;
149 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
150 scsi_qla_host_t *vha = shost_priv(host);
151 struct qla_hw_data *ha = vha->hw;
152 int ret = 0;
153 uint32_t len;
154 uint32_t oper;
155
156 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
157 ret = -EINVAL;
158 goto exit_fcp_prio_cfg;
159 }
160
161 /* Get the sub command */
162 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
163
164 /* Only set config is allowed if config memory is not allocated */
165 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
166 ret = -EINVAL;
167 goto exit_fcp_prio_cfg;
168 }
169 switch (oper) {
170 case QLFC_FCP_PRIO_DISABLE:
171 if (ha->flags.fcp_prio_enabled) {
172 ha->flags.fcp_prio_enabled = 0;
173 ha->fcp_prio_cfg->attributes &=
174 ~FCP_PRIO_ATTR_ENABLE;
175 qla24xx_update_all_fcp_prio(vha);
176 bsg_reply->result = DID_OK;
177 } else {
178 ret = -EINVAL;
179 bsg_reply->result = (DID_ERROR << 16);
180 goto exit_fcp_prio_cfg;
181 }
182 break;
183
184 case QLFC_FCP_PRIO_ENABLE:
185 if (!ha->flags.fcp_prio_enabled) {
186 if (ha->fcp_prio_cfg) {
187 ha->flags.fcp_prio_enabled = 1;
188 ha->fcp_prio_cfg->attributes |=
189 FCP_PRIO_ATTR_ENABLE;
190 qla24xx_update_all_fcp_prio(vha);
191 bsg_reply->result = DID_OK;
192 } else {
193 ret = -EINVAL;
194 bsg_reply->result = (DID_ERROR << 16);
195 goto exit_fcp_prio_cfg;
196 }
197 }
198 break;
199
200 case QLFC_FCP_PRIO_GET_CONFIG:
201 len = bsg_job->reply_payload.payload_len;
202 if (!len || len > FCP_PRIO_CFG_SIZE) {
203 ret = -EINVAL;
204 bsg_reply->result = (DID_ERROR << 16);
205 goto exit_fcp_prio_cfg;
206 }
207
208 bsg_reply->result = DID_OK;
209 bsg_reply->reply_payload_rcv_len =
210 sg_copy_from_buffer(
211 bsg_job->reply_payload.sg_list,
212 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
213 len);
214
215 break;
216
217 case QLFC_FCP_PRIO_SET_CONFIG:
218 len = bsg_job->request_payload.payload_len;
219 if (!len || len > FCP_PRIO_CFG_SIZE) {
220 bsg_reply->result = (DID_ERROR << 16);
221 ret = -EINVAL;
222 goto exit_fcp_prio_cfg;
223 }
224
225 if (!ha->fcp_prio_cfg) {
226 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
227 if (!ha->fcp_prio_cfg) {
228 ql_log(ql_log_warn, vha, 0x7050,
229 "Unable to allocate memory for fcp prio "
230 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
231 bsg_reply->result = (DID_ERROR << 16);
232 ret = -ENOMEM;
233 goto exit_fcp_prio_cfg;
234 }
235 }
236
237 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
238 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
239 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
240 FCP_PRIO_CFG_SIZE);
241
242 /* validate fcp priority data */
243
244 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
245 bsg_reply->result = (DID_ERROR << 16);
246 ret = -EINVAL;
247 /* If buffer was invalidatic int
248 * fcp_prio_cfg is of no use
249 */
250 vfree(ha->fcp_prio_cfg);
251 ha->fcp_prio_cfg = NULL;
252 goto exit_fcp_prio_cfg;
253 }
254
255 ha->flags.fcp_prio_enabled = 0;
256 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
257 ha->flags.fcp_prio_enabled = 1;
258 qla24xx_update_all_fcp_prio(vha);
259 bsg_reply->result = DID_OK;
260 break;
261 default:
262 ret = -EINVAL;
263 break;
264 }
265 exit_fcp_prio_cfg:
266 if (!ret)
267 bsg_job_done(bsg_job, bsg_reply->result,
268 bsg_reply->reply_payload_rcv_len);
269 return ret;
270 }
271
272 static int
273 qla2x00_process_els(struct bsg_job *bsg_job)
274 {
275 struct fc_bsg_request *bsg_request = bsg_job->request;
276 struct fc_rport *rport;
277 fc_port_t *fcport = NULL;
278 struct Scsi_Host *host;
279 scsi_qla_host_t *vha;
280 struct qla_hw_data *ha;
281 srb_t *sp;
282 const char *type;
283 int req_sg_cnt, rsp_sg_cnt;
284 int rval = (DID_ERROR << 16);
285 uint32_t els_cmd = 0;
286 int qla_port_allocated = 0;
287
288 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
289 rport = fc_bsg_to_rport(bsg_job);
290 if (!rport) {
291 rval = -ENOMEM;
292 goto done;
293 }
294 fcport = *(fc_port_t **) rport->dd_data;
295 host = rport_to_shost(rport);
296 vha = shost_priv(host);
297 ha = vha->hw;
298 type = "FC_BSG_RPT_ELS";
299 } else {
300 host = fc_bsg_to_shost(bsg_job);
301 vha = shost_priv(host);
302 ha = vha->hw;
303 type = "FC_BSG_HST_ELS_NOLOGIN";
304 els_cmd = bsg_request->rqst_data.h_els.command_code;
305 if (els_cmd == ELS_AUTH_ELS)
306 return qla_edif_process_els(vha, bsg_job);
307 }
308
309 if (!vha->flags.online) {
310 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
311 rval = -EIO;
312 goto done;
313 }
314
315 /* pass through is supported only for ISP 4Gb or higher */
316 if (!IS_FWI2_CAPABLE(ha)) {
317 ql_dbg(ql_dbg_user, vha, 0x7001,
318 "ELS passthru not supported for ISP23xx based adapters.\n");
319 rval = -EPERM;
320 goto done;
321 }
322
323 /* Multiple SG's are not supported for ELS requests */
324 if (bsg_job->request_payload.sg_cnt > 1 ||
325 bsg_job->reply_payload.sg_cnt > 1) {
326 ql_dbg(ql_dbg_user, vha, 0x7002,
327 "Multiple SG's are not supported for ELS requests, "
328 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
329 bsg_job->request_payload.sg_cnt,
330 bsg_job->reply_payload.sg_cnt);
331 rval = -ENOBUFS;
332 goto done;
333 }
334
335 /* ELS request for rport */
336 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
337 /* make sure the rport is logged in,
338 * if not perform fabric login
339 */
340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
341 ql_dbg(ql_dbg_user, vha, 0x7003,
342 "Port %06X is not online for ELS passthru.\n",
343 fcport->d_id.b24);
344 rval = -EIO;
345 goto done;
346 }
347 } else {
348 /* Allocate a dummy fcport structure, since functions
349 * preparing the IOCB and mailbox command retrieves port
350 * specific information from fcport structure. For Host based
351 * ELS commands there will be no fcport structure allocated
352 */
353 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
354 if (!fcport) {
355 rval = -ENOMEM;
356 goto done;
357 }
358
359 qla_port_allocated = 1;
360 /* Initialize all required fields of fcport */
361 fcport->vha = vha;
362 fcport->d_id.b.al_pa =
363 bsg_request->rqst_data.h_els.port_id[0];
364 fcport->d_id.b.area =
365 bsg_request->rqst_data.h_els.port_id[1];
366 fcport->d_id.b.domain =
367 bsg_request->rqst_data.h_els.port_id[2];
368 fcport->loop_id =
369 (fcport->d_id.b.al_pa == 0xFD) ?
370 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
371 }
372
373 req_sg_cnt =
374 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
375 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
376 if (!req_sg_cnt) {
377 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
378 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
379 rval = -ENOMEM;
380 goto done_free_fcport;
381 }
382
383 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
384 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
385 if (!rsp_sg_cnt) {
386 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
387 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
388 rval = -ENOMEM;
389 goto done_free_fcport;
390 }
391
392 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
393 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
394 ql_log(ql_log_warn, vha, 0x7008,
395 "dma mapping resulted in different sg counts, "
396 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
397 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
398 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
399 rval = -EAGAIN;
400 goto done_unmap_sg;
401 }
402
403 /* Alloc SRB structure */
404 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
405 if (!sp) {
406 rval = -ENOMEM;
407 goto done_unmap_sg;
408 }
409
410 sp->type =
411 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
412 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
413 sp->name =
414 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
415 "bsg_els_rpt" : "bsg_els_hst");
416 sp->u.bsg_job = bsg_job;
417 sp->free = qla2x00_bsg_sp_free;
418 sp->done = qla2x00_bsg_job_done;
419
420 ql_dbg(ql_dbg_user, vha, 0x700a,
421 "bsg rqst type: %s els type: %x - loop-id=%x "
422 "portid=%-2x%02x%02x.\n", type,
423 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
424 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
425
426 rval = qla2x00_start_sp(sp);
427 if (rval != QLA_SUCCESS) {
428 ql_log(ql_log_warn, vha, 0x700e,
429 "qla2x00_start_sp failed = %d\n", rval);
430 qla2x00_rel_sp(sp);
431 rval = -EIO;
432 goto done_unmap_sg;
433 }
434 return rval;
435
436 done_unmap_sg:
437 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
438 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
439 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
440 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
441 goto done_free_fcport;
442
443 done_free_fcport:
444 if (qla_port_allocated)
445 qla2x00_free_fcport(fcport);
446 done:
447 return rval;
448 }
449
450 static inline uint16_t
451 qla24xx_calc_ct_iocbs(uint16_t dsds)
452 {
453 uint16_t iocbs;
454
455 iocbs = 1;
456 if (dsds > 2) {
457 iocbs += (dsds - 2) / 5;
458 if ((dsds - 2) % 5)
459 iocbs++;
460 }
461 return iocbs;
462 }
463
464 static int
465 qla2x00_process_ct(struct bsg_job *bsg_job)
466 {
467 srb_t *sp;
468 struct fc_bsg_request *bsg_request = bsg_job->request;
469 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
470 scsi_qla_host_t *vha = shost_priv(host);
471 struct qla_hw_data *ha = vha->hw;
472 int rval = (DID_ERROR << 16);
473 int req_sg_cnt, rsp_sg_cnt;
474 uint16_t loop_id;
475 struct fc_port *fcport;
476 char *type = "FC_BSG_HST_CT";
477
478 req_sg_cnt =
479 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
480 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
481 if (!req_sg_cnt) {
482 ql_log(ql_log_warn, vha, 0x700f,
483 "dma_map_sg return %d for request\n", req_sg_cnt);
484 rval = -ENOMEM;
485 goto done;
486 }
487
488 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
489 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
490 if (!rsp_sg_cnt) {
491 ql_log(ql_log_warn, vha, 0x7010,
492 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
493 rval = -ENOMEM;
494 goto done;
495 }
496
497 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
498 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
499 ql_log(ql_log_warn, vha, 0x7011,
500 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
501 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
502 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
503 rval = -EAGAIN;
504 goto done_unmap_sg;
505 }
506
507 if (!vha->flags.online) {
508 ql_log(ql_log_warn, vha, 0x7012,
509 "Host is not online.\n");
510 rval = -EIO;
511 goto done_unmap_sg;
512 }
513
514 loop_id =
515 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
516 >> 24;
517 switch (loop_id) {
518 case 0xFC:
519 loop_id = NPH_SNS;
520 break;
521 case 0xFA:
522 loop_id = vha->mgmt_svr_loop_id;
523 break;
524 default:
525 ql_dbg(ql_dbg_user, vha, 0x7013,
526 "Unknown loop id: %x.\n", loop_id);
527 rval = -EINVAL;
528 goto done_unmap_sg;
529 }
530
531 /* Allocate a dummy fcport structure, since functions preparing the
532 * IOCB and mailbox command retrieves port specific information
533 * from fcport structure. For Host based ELS commands there will be
534 * no fcport structure allocated
535 */
536 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
537 if (!fcport) {
538 ql_log(ql_log_warn, vha, 0x7014,
539 "Failed to allocate fcport.\n");
540 rval = -ENOMEM;
541 goto done_unmap_sg;
542 }
543
544 /* Initialize all required fields of fcport */
545 fcport->vha = vha;
546 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
547 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
548 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
549 fcport->loop_id = loop_id;
550
551 /* Alloc SRB structure */
552 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
553 if (!sp) {
554 ql_log(ql_log_warn, vha, 0x7015,
555 "qla2x00_get_sp failed.\n");
556 rval = -ENOMEM;
557 goto done_free_fcport;
558 }
559
560 sp->type = SRB_CT_CMD;
561 sp->name = "bsg_ct";
562 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
563 sp->u.bsg_job = bsg_job;
564 sp->free = qla2x00_bsg_sp_free;
565 sp->done = qla2x00_bsg_job_done;
566
567 ql_dbg(ql_dbg_user, vha, 0x7016,
568 "bsg rqst type: %s else type: %x - "
569 "loop-id=%x portid=%02x%02x%02x.\n", type,
570 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
571 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
572 fcport->d_id.b.al_pa);
573
574 rval = qla2x00_start_sp(sp);
575 if (rval != QLA_SUCCESS) {
576 ql_log(ql_log_warn, vha, 0x7017,
577 "qla2x00_start_sp failed=%d.\n", rval);
578 qla2x00_rel_sp(sp);
579 rval = -EIO;
580 goto done_free_fcport;
581 }
582 return rval;
583
584 done_free_fcport:
585 qla2x00_free_fcport(fcport);
586 done_unmap_sg:
587 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
588 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
589 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
590 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
591 done:
592 return rval;
593 }
594
595 /* Disable loopback mode */
596 static inline int
597 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
598 int wait, int wait2)
599 {
600 int ret = 0;
601 int rval = 0;
602 uint16_t new_config[4];
603 struct qla_hw_data *ha = vha->hw;
604
605 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
606 goto done_reset_internal;
607
608 memset(new_config, 0 , sizeof(new_config));
609 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
610 ENABLE_INTERNAL_LOOPBACK ||
611 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
612 ENABLE_EXTERNAL_LOOPBACK) {
613 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
614 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
615 (new_config[0] & INTERNAL_LOOPBACK_MASK));
616 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
617
618 ha->notify_dcbx_comp = wait;
619 ha->notify_lb_portup_comp = wait2;
620
621 ret = qla81xx_set_port_config(vha, new_config);
622 if (ret != QLA_SUCCESS) {
623 ql_log(ql_log_warn, vha, 0x7025,
624 "Set port config failed.\n");
625 ha->notify_dcbx_comp = 0;
626 ha->notify_lb_portup_comp = 0;
627 rval = -EINVAL;
628 goto done_reset_internal;
629 }
630
631 /* Wait for DCBX complete event */
632 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
633 (DCBX_COMP_TIMEOUT * HZ))) {
634 ql_dbg(ql_dbg_user, vha, 0x7026,
635 "DCBX completion not received.\n");
636 ha->notify_dcbx_comp = 0;
637 ha->notify_lb_portup_comp = 0;
638 rval = -EINVAL;
639 goto done_reset_internal;
640 } else
641 ql_dbg(ql_dbg_user, vha, 0x7027,
642 "DCBX completion received.\n");
643
644 if (wait2 &&
645 !wait_for_completion_timeout(&ha->lb_portup_comp,
646 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
647 ql_dbg(ql_dbg_user, vha, 0x70c5,
648 "Port up completion not received.\n");
649 ha->notify_lb_portup_comp = 0;
650 rval = -EINVAL;
651 goto done_reset_internal;
652 } else
653 ql_dbg(ql_dbg_user, vha, 0x70c6,
654 "Port up completion received.\n");
655
656 ha->notify_dcbx_comp = 0;
657 ha->notify_lb_portup_comp = 0;
658 }
659 done_reset_internal:
660 return rval;
661 }
662
663 /*
664 * Set the port configuration to enable the internal or external loopback
665 * depending on the loopback mode.
666 */
667 static inline int
668 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
669 uint16_t *new_config, uint16_t mode)
670 {
671 int ret = 0;
672 int rval = 0;
673 unsigned long rem_tmo = 0, current_tmo = 0;
674 struct qla_hw_data *ha = vha->hw;
675
676 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
677 goto done_set_internal;
678
679 if (mode == INTERNAL_LOOPBACK)
680 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
681 else if (mode == EXTERNAL_LOOPBACK)
682 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
683 ql_dbg(ql_dbg_user, vha, 0x70be,
684 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
685
686 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
687
688 ha->notify_dcbx_comp = 1;
689 ret = qla81xx_set_port_config(vha, new_config);
690 if (ret != QLA_SUCCESS) {
691 ql_log(ql_log_warn, vha, 0x7021,
692 "set port config failed.\n");
693 ha->notify_dcbx_comp = 0;
694 rval = -EINVAL;
695 goto done_set_internal;
696 }
697
698 /* Wait for DCBX complete event */
699 current_tmo = DCBX_COMP_TIMEOUT * HZ;
700 while (1) {
701 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
702 current_tmo);
703 if (!ha->idc_extend_tmo || rem_tmo) {
704 ha->idc_extend_tmo = 0;
705 break;
706 }
707 current_tmo = ha->idc_extend_tmo * HZ;
708 ha->idc_extend_tmo = 0;
709 }
710
711 if (!rem_tmo) {
712 ql_dbg(ql_dbg_user, vha, 0x7022,
713 "DCBX completion not received.\n");
714 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
715 /*
716 * If the reset of the loopback mode doesn't work take a FCoE
717 * dump and reset the chip.
718 */
719 if (ret) {
720 qla2xxx_dump_fw(vha);
721 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
722 }
723 rval = -EINVAL;
724 } else {
725 if (ha->flags.idc_compl_status) {
726 ql_dbg(ql_dbg_user, vha, 0x70c3,
727 "Bad status in IDC Completion AEN\n");
728 rval = -EINVAL;
729 ha->flags.idc_compl_status = 0;
730 } else
731 ql_dbg(ql_dbg_user, vha, 0x7023,
732 "DCBX completion received.\n");
733 }
734
735 ha->notify_dcbx_comp = 0;
736 ha->idc_extend_tmo = 0;
737
738 done_set_internal:
739 return rval;
740 }
741
742 static int
743 qla2x00_process_loopback(struct bsg_job *bsg_job)
744 {
745 struct fc_bsg_request *bsg_request = bsg_job->request;
746 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
747 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
748 scsi_qla_host_t *vha = shost_priv(host);
749 struct qla_hw_data *ha = vha->hw;
750 int rval;
751 uint8_t command_sent;
752 char *type;
753 struct msg_echo_lb elreq;
754 uint16_t response[MAILBOX_REGISTER_COUNT];
755 uint16_t config[4], new_config[4];
756 uint8_t *fw_sts_ptr;
757 void *req_data = NULL;
758 dma_addr_t req_data_dma;
759 uint32_t req_data_len;
760 uint8_t *rsp_data = NULL;
761 dma_addr_t rsp_data_dma;
762 uint32_t rsp_data_len;
763
764 if (!vha->flags.online) {
765 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
766 return -EIO;
767 }
768
769 memset(&elreq, 0, sizeof(elreq));
770
771 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
772 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
773 DMA_TO_DEVICE);
774
775 if (!elreq.req_sg_cnt) {
776 ql_log(ql_log_warn, vha, 0x701a,
777 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
778 return -ENOMEM;
779 }
780
781 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
782 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
783 DMA_FROM_DEVICE);
784
785 if (!elreq.rsp_sg_cnt) {
786 ql_log(ql_log_warn, vha, 0x701b,
787 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
788 rval = -ENOMEM;
789 goto done_unmap_req_sg;
790 }
791
792 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
793 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
794 ql_log(ql_log_warn, vha, 0x701c,
795 "dma mapping resulted in different sg counts, "
796 "request_sg_cnt: %x dma_request_sg_cnt: %x "
797 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
798 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
799 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
800 rval = -EAGAIN;
801 goto done_unmap_sg;
802 }
803 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
804 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
805 &req_data_dma, GFP_KERNEL);
806 if (!req_data) {
807 ql_log(ql_log_warn, vha, 0x701d,
808 "dma alloc failed for req_data.\n");
809 rval = -ENOMEM;
810 goto done_unmap_sg;
811 }
812
813 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
814 &rsp_data_dma, GFP_KERNEL);
815 if (!rsp_data) {
816 ql_log(ql_log_warn, vha, 0x7004,
817 "dma alloc failed for rsp_data.\n");
818 rval = -ENOMEM;
819 goto done_free_dma_req;
820 }
821
822 /* Copy the request buffer in req_data now */
823 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
824 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
825
826 elreq.send_dma = req_data_dma;
827 elreq.rcv_dma = rsp_data_dma;
828 elreq.transfer_size = req_data_len;
829
830 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
831 elreq.iteration_count =
832 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
833
834 if (atomic_read(&vha->loop_state) == LOOP_READY &&
835 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
836 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
837 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
838 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
839 elreq.options == EXTERNAL_LOOPBACK))) {
840 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
841 ql_dbg(ql_dbg_user, vha, 0x701e,
842 "BSG request type: %s.\n", type);
843 command_sent = INT_DEF_LB_ECHO_CMD;
844 rval = qla2x00_echo_test(vha, &elreq, response);
845 } else {
846 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
847 memset(config, 0, sizeof(config));
848 memset(new_config, 0, sizeof(new_config));
849
850 if (qla81xx_get_port_config(vha, config)) {
851 ql_log(ql_log_warn, vha, 0x701f,
852 "Get port config failed.\n");
853 rval = -EPERM;
854 goto done_free_dma_rsp;
855 }
856
857 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
858 ql_dbg(ql_dbg_user, vha, 0x70c4,
859 "Loopback operation already in "
860 "progress.\n");
861 rval = -EAGAIN;
862 goto done_free_dma_rsp;
863 }
864
865 ql_dbg(ql_dbg_user, vha, 0x70c0,
866 "elreq.options=%04x\n", elreq.options);
867
868 if (elreq.options == EXTERNAL_LOOPBACK)
869 if (IS_QLA8031(ha) || IS_QLA8044(ha))
870 rval = qla81xx_set_loopback_mode(vha,
871 config, new_config, elreq.options);
872 else
873 rval = qla81xx_reset_loopback_mode(vha,
874 config, 1, 0);
875 else
876 rval = qla81xx_set_loopback_mode(vha, config,
877 new_config, elreq.options);
878
879 if (rval) {
880 rval = -EPERM;
881 goto done_free_dma_rsp;
882 }
883
884 type = "FC_BSG_HST_VENDOR_LOOPBACK";
885 ql_dbg(ql_dbg_user, vha, 0x7028,
886 "BSG request type: %s.\n", type);
887
888 command_sent = INT_DEF_LB_LOOPBACK_CMD;
889 rval = qla2x00_loopback_test(vha, &elreq, response);
890
891 if (response[0] == MBS_COMMAND_ERROR &&
892 response[1] == MBS_LB_RESET) {
893 ql_log(ql_log_warn, vha, 0x7029,
894 "MBX command error, Aborting ISP.\n");
895 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
896 qla2xxx_wake_dpc(vha);
897 qla2x00_wait_for_chip_reset(vha);
898 /* Also reset the MPI */
899 if (IS_QLA81XX(ha)) {
900 if (qla81xx_restart_mpi_firmware(vha) !=
901 QLA_SUCCESS) {
902 ql_log(ql_log_warn, vha, 0x702a,
903 "MPI reset failed.\n");
904 }
905 }
906
907 rval = -EIO;
908 goto done_free_dma_rsp;
909 }
910
911 if (new_config[0]) {
912 int ret;
913
914 /* Revert back to original port config
915 * Also clear internal loopback
916 */
917 ret = qla81xx_reset_loopback_mode(vha,
918 new_config, 0, 1);
919 if (ret) {
920 /*
921 * If the reset of the loopback mode
922 * doesn't work take FCoE dump and then
923 * reset the chip.
924 */
925 qla2xxx_dump_fw(vha);
926 set_bit(ISP_ABORT_NEEDED,
927 &vha->dpc_flags);
928 }
929
930 }
931
932 } else {
933 type = "FC_BSG_HST_VENDOR_LOOPBACK";
934 ql_dbg(ql_dbg_user, vha, 0x702b,
935 "BSG request type: %s.\n", type);
936 command_sent = INT_DEF_LB_LOOPBACK_CMD;
937 rval = qla2x00_loopback_test(vha, &elreq, response);
938 }
939 }
940
941 if (rval) {
942 ql_log(ql_log_warn, vha, 0x702c,
943 "Vendor request %s failed.\n", type);
944
945 rval = 0;
946 bsg_reply->result = (DID_ERROR << 16);
947 bsg_reply->reply_payload_rcv_len = 0;
948 } else {
949 ql_dbg(ql_dbg_user, vha, 0x702d,
950 "Vendor request %s completed.\n", type);
951 bsg_reply->result = (DID_OK << 16);
952 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
953 bsg_job->reply_payload.sg_cnt, rsp_data,
954 rsp_data_len);
955 }
956
957 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
958 sizeof(response) + sizeof(uint8_t);
959 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
960 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
961 sizeof(response));
962 fw_sts_ptr += sizeof(response);
963 *fw_sts_ptr = command_sent;
964
965 done_free_dma_rsp:
966 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
967 rsp_data, rsp_data_dma);
968 done_free_dma_req:
969 dma_free_coherent(&ha->pdev->dev, req_data_len,
970 req_data, req_data_dma);
971 done_unmap_sg:
972 dma_unmap_sg(&ha->pdev->dev,
973 bsg_job->reply_payload.sg_list,
974 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
975 done_unmap_req_sg:
976 dma_unmap_sg(&ha->pdev->dev,
977 bsg_job->request_payload.sg_list,
978 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
979 if (!rval)
980 bsg_job_done(bsg_job, bsg_reply->result,
981 bsg_reply->reply_payload_rcv_len);
982 return rval;
983 }
984
985 static int
986 qla84xx_reset(struct bsg_job *bsg_job)
987 {
988 struct fc_bsg_request *bsg_request = bsg_job->request;
989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991 scsi_qla_host_t *vha = shost_priv(host);
992 struct qla_hw_data *ha = vha->hw;
993 int rval = 0;
994 uint32_t flag;
995
996 if (!IS_QLA84XX(ha)) {
997 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
998 return -EINVAL;
999 }
1000
1001 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1002
1003 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
1004
1005 if (rval) {
1006 ql_log(ql_log_warn, vha, 0x7030,
1007 "Vendor request 84xx reset failed.\n");
1008 rval = (DID_ERROR << 16);
1009
1010 } else {
1011 ql_dbg(ql_dbg_user, vha, 0x7031,
1012 "Vendor request 84xx reset completed.\n");
1013 bsg_reply->result = DID_OK;
1014 bsg_job_done(bsg_job, bsg_reply->result,
1015 bsg_reply->reply_payload_rcv_len);
1016 }
1017
1018 return rval;
1019 }
1020
1021 static int
1022 qla84xx_updatefw(struct bsg_job *bsg_job)
1023 {
1024 struct fc_bsg_request *bsg_request = bsg_job->request;
1025 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1026 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1027 scsi_qla_host_t *vha = shost_priv(host);
1028 struct qla_hw_data *ha = vha->hw;
1029 struct verify_chip_entry_84xx *mn = NULL;
1030 dma_addr_t mn_dma, fw_dma;
1031 void *fw_buf = NULL;
1032 int rval = 0;
1033 uint32_t sg_cnt;
1034 uint32_t data_len;
1035 uint16_t options;
1036 uint32_t flag;
1037 uint32_t fw_ver;
1038
1039 if (!IS_QLA84XX(ha)) {
1040 ql_dbg(ql_dbg_user, vha, 0x7032,
1041 "Not 84xx, exiting.\n");
1042 return -EINVAL;
1043 }
1044
1045 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1046 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1047 if (!sg_cnt) {
1048 ql_log(ql_log_warn, vha, 0x7033,
1049 "dma_map_sg returned %d for request.\n", sg_cnt);
1050 return -ENOMEM;
1051 }
1052
1053 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1054 ql_log(ql_log_warn, vha, 0x7034,
1055 "DMA mapping resulted in different sg counts, "
1056 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1057 bsg_job->request_payload.sg_cnt, sg_cnt);
1058 rval = -EAGAIN;
1059 goto done_unmap_sg;
1060 }
1061
1062 data_len = bsg_job->request_payload.payload_len;
1063 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1064 &fw_dma, GFP_KERNEL);
1065 if (!fw_buf) {
1066 ql_log(ql_log_warn, vha, 0x7035,
1067 "DMA alloc failed for fw_buf.\n");
1068 rval = -ENOMEM;
1069 goto done_unmap_sg;
1070 }
1071
1072 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1073 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1074
1075 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1076 if (!mn) {
1077 ql_log(ql_log_warn, vha, 0x7036,
1078 "DMA alloc failed for fw buffer.\n");
1079 rval = -ENOMEM;
1080 goto done_free_fw_buf;
1081 }
1082
1083 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1084 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1085
1086 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1087 mn->entry_count = 1;
1088
1089 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1090 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1091 options |= VCO_DIAG_FW;
1092
1093 mn->options = cpu_to_le16(options);
1094 mn->fw_ver = cpu_to_le32(fw_ver);
1095 mn->fw_size = cpu_to_le32(data_len);
1096 mn->fw_seq_size = cpu_to_le32(data_len);
1097 put_unaligned_le64(fw_dma, &mn->dsd.address);
1098 mn->dsd.length = cpu_to_le32(data_len);
1099 mn->data_seg_cnt = cpu_to_le16(1);
1100
1101 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1102
1103 if (rval) {
1104 ql_log(ql_log_warn, vha, 0x7037,
1105 "Vendor request 84xx updatefw failed.\n");
1106
1107 rval = (DID_ERROR << 16);
1108 } else {
1109 ql_dbg(ql_dbg_user, vha, 0x7038,
1110 "Vendor request 84xx updatefw completed.\n");
1111
1112 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1113 bsg_reply->result = DID_OK;
1114 }
1115
1116 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1117
1118 done_free_fw_buf:
1119 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1120
1121 done_unmap_sg:
1122 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1123 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1124
1125 if (!rval)
1126 bsg_job_done(bsg_job, bsg_reply->result,
1127 bsg_reply->reply_payload_rcv_len);
1128 return rval;
1129 }
1130
1131 static int
1132 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1133 {
1134 struct fc_bsg_request *bsg_request = bsg_job->request;
1135 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1136 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1137 scsi_qla_host_t *vha = shost_priv(host);
1138 struct qla_hw_data *ha = vha->hw;
1139 struct access_chip_84xx *mn = NULL;
1140 dma_addr_t mn_dma, mgmt_dma;
1141 void *mgmt_b = NULL;
1142 int rval = 0;
1143 struct qla_bsg_a84_mgmt *ql84_mgmt;
1144 uint32_t sg_cnt;
1145 uint32_t data_len = 0;
1146 uint32_t dma_direction = DMA_NONE;
1147
1148 if (!IS_QLA84XX(ha)) {
1149 ql_log(ql_log_warn, vha, 0x703a,
1150 "Not 84xx, exiting.\n");
1151 return -EINVAL;
1152 }
1153
1154 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1155 if (!mn) {
1156 ql_log(ql_log_warn, vha, 0x703c,
1157 "DMA alloc failed for fw buffer.\n");
1158 return -ENOMEM;
1159 }
1160
1161 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1162 mn->entry_count = 1;
1163 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1164 switch (ql84_mgmt->mgmt.cmd) {
1165 case QLA84_MGMT_READ_MEM:
1166 case QLA84_MGMT_GET_INFO:
1167 sg_cnt = dma_map_sg(&ha->pdev->dev,
1168 bsg_job->reply_payload.sg_list,
1169 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1170 if (!sg_cnt) {
1171 ql_log(ql_log_warn, vha, 0x703d,
1172 "dma_map_sg returned %d for reply.\n", sg_cnt);
1173 rval = -ENOMEM;
1174 goto exit_mgmt;
1175 }
1176
1177 dma_direction = DMA_FROM_DEVICE;
1178
1179 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1180 ql_log(ql_log_warn, vha, 0x703e,
1181 "DMA mapping resulted in different sg counts, "
1182 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1183 bsg_job->reply_payload.sg_cnt, sg_cnt);
1184 rval = -EAGAIN;
1185 goto done_unmap_sg;
1186 }
1187
1188 data_len = bsg_job->reply_payload.payload_len;
1189
1190 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1191 &mgmt_dma, GFP_KERNEL);
1192 if (!mgmt_b) {
1193 ql_log(ql_log_warn, vha, 0x703f,
1194 "DMA alloc failed for mgmt_b.\n");
1195 rval = -ENOMEM;
1196 goto done_unmap_sg;
1197 }
1198
1199 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1200 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1201 mn->parameter1 =
1202 cpu_to_le32(
1203 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1204
1205 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1206 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1207 mn->parameter1 =
1208 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1209
1210 mn->parameter2 =
1211 cpu_to_le32(
1212 ql84_mgmt->mgmt.mgmtp.u.info.context);
1213 }
1214 break;
1215
1216 case QLA84_MGMT_WRITE_MEM:
1217 sg_cnt = dma_map_sg(&ha->pdev->dev,
1218 bsg_job->request_payload.sg_list,
1219 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1220
1221 if (!sg_cnt) {
1222 ql_log(ql_log_warn, vha, 0x7040,
1223 "dma_map_sg returned %d.\n", sg_cnt);
1224 rval = -ENOMEM;
1225 goto exit_mgmt;
1226 }
1227
1228 dma_direction = DMA_TO_DEVICE;
1229
1230 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1231 ql_log(ql_log_warn, vha, 0x7041,
1232 "DMA mapping resulted in different sg counts, "
1233 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1234 bsg_job->request_payload.sg_cnt, sg_cnt);
1235 rval = -EAGAIN;
1236 goto done_unmap_sg;
1237 }
1238
1239 data_len = bsg_job->request_payload.payload_len;
1240 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1241 &mgmt_dma, GFP_KERNEL);
1242 if (!mgmt_b) {
1243 ql_log(ql_log_warn, vha, 0x7042,
1244 "DMA alloc failed for mgmt_b.\n");
1245 rval = -ENOMEM;
1246 goto done_unmap_sg;
1247 }
1248
1249 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1250 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1251
1252 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1253 mn->parameter1 =
1254 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1255 break;
1256
1257 case QLA84_MGMT_CHNG_CONFIG:
1258 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1259 mn->parameter1 =
1260 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1261
1262 mn->parameter2 =
1263 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1264
1265 mn->parameter3 =
1266 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1267 break;
1268
1269 default:
1270 rval = -EIO;
1271 goto exit_mgmt;
1272 }
1273
1274 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1275 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1276 mn->dseg_count = cpu_to_le16(1);
1277 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1278 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1279 }
1280
1281 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1282
1283 if (rval) {
1284 ql_log(ql_log_warn, vha, 0x7043,
1285 "Vendor request 84xx mgmt failed.\n");
1286
1287 rval = (DID_ERROR << 16);
1288
1289 } else {
1290 ql_dbg(ql_dbg_user, vha, 0x7044,
1291 "Vendor request 84xx mgmt completed.\n");
1292
1293 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1294 bsg_reply->result = DID_OK;
1295
1296 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1297 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1298 bsg_reply->reply_payload_rcv_len =
1299 bsg_job->reply_payload.payload_len;
1300
1301 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1302 bsg_job->reply_payload.sg_cnt, mgmt_b,
1303 data_len);
1304 }
1305 }
1306
1307 done_unmap_sg:
1308 if (mgmt_b)
1309 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1310
1311 if (dma_direction == DMA_TO_DEVICE)
1312 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1313 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1314 else if (dma_direction == DMA_FROM_DEVICE)
1315 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1316 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1317
1318 exit_mgmt:
1319 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1320
1321 if (!rval)
1322 bsg_job_done(bsg_job, bsg_reply->result,
1323 bsg_reply->reply_payload_rcv_len);
1324 return rval;
1325 }
1326
1327 static int
1328 qla24xx_iidma(struct bsg_job *bsg_job)
1329 {
1330 struct fc_bsg_request *bsg_request = bsg_job->request;
1331 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1332 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1333 scsi_qla_host_t *vha = shost_priv(host);
1334 int rval = 0;
1335 struct qla_port_param *port_param = NULL;
1336 fc_port_t *fcport = NULL;
1337 int found = 0;
1338 uint16_t mb[MAILBOX_REGISTER_COUNT];
1339 uint8_t *rsp_ptr = NULL;
1340
1341 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1342 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1343 return -EINVAL;
1344 }
1345
1346 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1347 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1348 ql_log(ql_log_warn, vha, 0x7048,
1349 "Invalid destination type.\n");
1350 return -EINVAL;
1351 }
1352
1353 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1354 if (fcport->port_type != FCT_TARGET)
1355 continue;
1356
1357 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1358 fcport->port_name, sizeof(fcport->port_name)))
1359 continue;
1360
1361 found = 1;
1362 break;
1363 }
1364
1365 if (!found) {
1366 ql_log(ql_log_warn, vha, 0x7049,
1367 "Failed to find port.\n");
1368 return -EINVAL;
1369 }
1370
1371 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1372 ql_log(ql_log_warn, vha, 0x704a,
1373 "Port is not online.\n");
1374 return -EINVAL;
1375 }
1376
1377 if (fcport->flags & FCF_LOGIN_NEEDED) {
1378 ql_log(ql_log_warn, vha, 0x704b,
1379 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1380 return -EINVAL;
1381 }
1382
1383 if (port_param->mode)
1384 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1385 port_param->speed, mb);
1386 else
1387 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1388 &port_param->speed, mb);
1389
1390 if (rval) {
1391 ql_log(ql_log_warn, vha, 0x704c,
1392 "iiDMA cmd failed for %8phN -- "
1393 "%04x %x %04x %04x.\n", fcport->port_name,
1394 rval, fcport->fp_speed, mb[0], mb[1]);
1395 rval = (DID_ERROR << 16);
1396 } else {
1397 if (!port_param->mode) {
1398 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1399 sizeof(struct qla_port_param);
1400
1401 rsp_ptr = ((uint8_t *)bsg_reply) +
1402 sizeof(struct fc_bsg_reply);
1403
1404 memcpy(rsp_ptr, port_param,
1405 sizeof(struct qla_port_param));
1406 }
1407
1408 bsg_reply->result = DID_OK;
1409 bsg_job_done(bsg_job, bsg_reply->result,
1410 bsg_reply->reply_payload_rcv_len);
1411 }
1412
1413 return rval;
1414 }
1415
1416 static int
1417 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1418 uint8_t is_update)
1419 {
1420 struct fc_bsg_request *bsg_request = bsg_job->request;
1421 uint32_t start = 0;
1422 int valid = 0;
1423 struct qla_hw_data *ha = vha->hw;
1424
1425 if (unlikely(pci_channel_offline(ha->pdev)))
1426 return -EINVAL;
1427
1428 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1429 if (start > ha->optrom_size) {
1430 ql_log(ql_log_warn, vha, 0x7055,
1431 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1432 return -EINVAL;
1433 }
1434
1435 if (ha->optrom_state != QLA_SWAITING) {
1436 ql_log(ql_log_info, vha, 0x7056,
1437 "optrom_state %d.\n", ha->optrom_state);
1438 return -EBUSY;
1439 }
1440
1441 ha->optrom_region_start = start;
1442 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1443 if (is_update) {
1444 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1445 valid = 1;
1446 else if (start == (ha->flt_region_boot * 4) ||
1447 start == (ha->flt_region_fw * 4))
1448 valid = 1;
1449 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1450 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1451 IS_QLA28XX(ha))
1452 valid = 1;
1453 if (!valid) {
1454 ql_log(ql_log_warn, vha, 0x7058,
1455 "Invalid start region 0x%x/0x%x.\n", start,
1456 bsg_job->request_payload.payload_len);
1457 return -EINVAL;
1458 }
1459
1460 ha->optrom_region_size = start +
1461 bsg_job->request_payload.payload_len > ha->optrom_size ?
1462 ha->optrom_size - start :
1463 bsg_job->request_payload.payload_len;
1464 ha->optrom_state = QLA_SWRITING;
1465 } else {
1466 ha->optrom_region_size = start +
1467 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1468 ha->optrom_size - start :
1469 bsg_job->reply_payload.payload_len;
1470 ha->optrom_state = QLA_SREADING;
1471 }
1472
1473 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1474 if (!ha->optrom_buffer) {
1475 ql_log(ql_log_warn, vha, 0x7059,
1476 "Read: Unable to allocate memory for optrom retrieval "
1477 "(%x)\n", ha->optrom_region_size);
1478
1479 ha->optrom_state = QLA_SWAITING;
1480 return -ENOMEM;
1481 }
1482
1483 return 0;
1484 }
1485
1486 static int
1487 qla2x00_read_optrom(struct bsg_job *bsg_job)
1488 {
1489 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1490 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1491 scsi_qla_host_t *vha = shost_priv(host);
1492 struct qla_hw_data *ha = vha->hw;
1493 int rval = 0;
1494
1495 if (ha->flags.nic_core_reset_hdlr_active)
1496 return -EBUSY;
1497
1498 mutex_lock(&ha->optrom_mutex);
1499 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1500 if (rval) {
1501 mutex_unlock(&ha->optrom_mutex);
1502 return rval;
1503 }
1504
1505 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1506 ha->optrom_region_start, ha->optrom_region_size);
1507
1508 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1509 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1510 ha->optrom_region_size);
1511
1512 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1513 bsg_reply->result = DID_OK;
1514 vfree(ha->optrom_buffer);
1515 ha->optrom_buffer = NULL;
1516 ha->optrom_state = QLA_SWAITING;
1517 mutex_unlock(&ha->optrom_mutex);
1518 bsg_job_done(bsg_job, bsg_reply->result,
1519 bsg_reply->reply_payload_rcv_len);
1520 return rval;
1521 }
1522
1523 static int
1524 qla2x00_update_optrom(struct bsg_job *bsg_job)
1525 {
1526 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1527 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1528 scsi_qla_host_t *vha = shost_priv(host);
1529 struct qla_hw_data *ha = vha->hw;
1530 int rval = 0;
1531
1532 mutex_lock(&ha->optrom_mutex);
1533 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1534 if (rval) {
1535 mutex_unlock(&ha->optrom_mutex);
1536 return rval;
1537 }
1538
1539 /* Set the isp82xx_no_md_cap not to capture minidump */
1540 ha->flags.isp82xx_no_md_cap = 1;
1541
1542 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1543 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1544 ha->optrom_region_size);
1545
1546 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1547 ha->optrom_region_start, ha->optrom_region_size);
1548
1549 if (rval) {
1550 bsg_reply->result = -EINVAL;
1551 rval = -EINVAL;
1552 } else {
1553 bsg_reply->result = DID_OK;
1554 }
1555 vfree(ha->optrom_buffer);
1556 ha->optrom_buffer = NULL;
1557 ha->optrom_state = QLA_SWAITING;
1558 mutex_unlock(&ha->optrom_mutex);
1559 bsg_job_done(bsg_job, bsg_reply->result,
1560 bsg_reply->reply_payload_rcv_len);
1561 return rval;
1562 }
1563
1564 static int
1565 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1566 {
1567 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1568 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1569 scsi_qla_host_t *vha = shost_priv(host);
1570 struct qla_hw_data *ha = vha->hw;
1571 int rval = 0;
1572 uint8_t bsg[DMA_POOL_SIZE];
1573 struct qla_image_version_list *list = (void *)bsg;
1574 struct qla_image_version *image;
1575 uint32_t count;
1576 dma_addr_t sfp_dma;
1577 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1578
1579 if (!sfp) {
1580 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1581 EXT_STATUS_NO_MEMORY;
1582 goto done;
1583 }
1584
1585 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1586 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1587
1588 image = list->version;
1589 count = list->count;
1590 while (count--) {
1591 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1592 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1593 image->field_address.device, image->field_address.offset,
1594 sizeof(image->field_info), image->field_address.option);
1595 if (rval) {
1596 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1597 EXT_STATUS_MAILBOX;
1598 goto dealloc;
1599 }
1600 image++;
1601 }
1602
1603 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1604
1605 dealloc:
1606 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1607
1608 done:
1609 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1610 bsg_reply->result = DID_OK << 16;
1611 bsg_job_done(bsg_job, bsg_reply->result,
1612 bsg_reply->reply_payload_rcv_len);
1613
1614 return 0;
1615 }
1616
1617 static int
1618 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1619 {
1620 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1621 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1622 scsi_qla_host_t *vha = shost_priv(host);
1623 struct qla_hw_data *ha = vha->hw;
1624 int rval = 0;
1625 uint8_t bsg[DMA_POOL_SIZE];
1626 struct qla_status_reg *sr = (void *)bsg;
1627 dma_addr_t sfp_dma;
1628 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1629
1630 if (!sfp) {
1631 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1632 EXT_STATUS_NO_MEMORY;
1633 goto done;
1634 }
1635
1636 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1637 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1638
1639 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1640 sr->field_address.device, sr->field_address.offset,
1641 sizeof(sr->status_reg), sr->field_address.option);
1642 sr->status_reg = *sfp;
1643
1644 if (rval) {
1645 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1646 EXT_STATUS_MAILBOX;
1647 goto dealloc;
1648 }
1649
1650 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1651 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1652
1653 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1654
1655 dealloc:
1656 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1657
1658 done:
1659 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1660 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1661 bsg_reply->result = DID_OK << 16;
1662 bsg_job_done(bsg_job, bsg_reply->result,
1663 bsg_reply->reply_payload_rcv_len);
1664
1665 return 0;
1666 }
1667
1668 static int
1669 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1670 {
1671 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1672 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1673 scsi_qla_host_t *vha = shost_priv(host);
1674 struct qla_hw_data *ha = vha->hw;
1675 int rval = 0;
1676 uint8_t bsg[DMA_POOL_SIZE];
1677 struct qla_status_reg *sr = (void *)bsg;
1678 dma_addr_t sfp_dma;
1679 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1680
1681 if (!sfp) {
1682 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1683 EXT_STATUS_NO_MEMORY;
1684 goto done;
1685 }
1686
1687 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1688 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1689
1690 *sfp = sr->status_reg;
1691 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1692 sr->field_address.device, sr->field_address.offset,
1693 sizeof(sr->status_reg), sr->field_address.option);
1694
1695 if (rval) {
1696 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1697 EXT_STATUS_MAILBOX;
1698 goto dealloc;
1699 }
1700
1701 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1702
1703 dealloc:
1704 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1705
1706 done:
1707 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1708 bsg_reply->result = DID_OK << 16;
1709 bsg_job_done(bsg_job, bsg_reply->result,
1710 bsg_reply->reply_payload_rcv_len);
1711
1712 return 0;
1713 }
1714
1715 static int
1716 qla2x00_write_i2c(struct bsg_job *bsg_job)
1717 {
1718 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1719 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1720 scsi_qla_host_t *vha = shost_priv(host);
1721 struct qla_hw_data *ha = vha->hw;
1722 int rval = 0;
1723 uint8_t bsg[DMA_POOL_SIZE];
1724 struct qla_i2c_access *i2c = (void *)bsg;
1725 dma_addr_t sfp_dma;
1726 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1727
1728 if (!sfp) {
1729 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1730 EXT_STATUS_NO_MEMORY;
1731 goto done;
1732 }
1733
1734 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1735 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1736
1737 memcpy(sfp, i2c->buffer, i2c->length);
1738 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1739 i2c->device, i2c->offset, i2c->length, i2c->option);
1740
1741 if (rval) {
1742 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1743 EXT_STATUS_MAILBOX;
1744 goto dealloc;
1745 }
1746
1747 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1748
1749 dealloc:
1750 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1751
1752 done:
1753 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1754 bsg_reply->result = DID_OK << 16;
1755 bsg_job_done(bsg_job, bsg_reply->result,
1756 bsg_reply->reply_payload_rcv_len);
1757
1758 return 0;
1759 }
1760
1761 static int
1762 qla2x00_read_i2c(struct bsg_job *bsg_job)
1763 {
1764 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1765 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1766 scsi_qla_host_t *vha = shost_priv(host);
1767 struct qla_hw_data *ha = vha->hw;
1768 int rval = 0;
1769 uint8_t bsg[DMA_POOL_SIZE];
1770 struct qla_i2c_access *i2c = (void *)bsg;
1771 dma_addr_t sfp_dma;
1772 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1773
1774 if (!sfp) {
1775 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1776 EXT_STATUS_NO_MEMORY;
1777 goto done;
1778 }
1779
1780 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1781 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1782
1783 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1784 i2c->device, i2c->offset, i2c->length, i2c->option);
1785
1786 if (rval) {
1787 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1788 EXT_STATUS_MAILBOX;
1789 goto dealloc;
1790 }
1791
1792 memcpy(i2c->buffer, sfp, i2c->length);
1793 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1794 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1795
1796 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1797
1798 dealloc:
1799 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1800
1801 done:
1802 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1803 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1804 bsg_reply->result = DID_OK << 16;
1805 bsg_job_done(bsg_job, bsg_reply->result,
1806 bsg_reply->reply_payload_rcv_len);
1807
1808 return 0;
1809 }
1810
1811 static int
1812 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1813 {
1814 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1815 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1816 scsi_qla_host_t *vha = shost_priv(host);
1817 struct qla_hw_data *ha = vha->hw;
1818 uint32_t rval = EXT_STATUS_OK;
1819 uint16_t req_sg_cnt = 0;
1820 uint16_t rsp_sg_cnt = 0;
1821 uint16_t nextlid = 0;
1822 uint32_t tot_dsds;
1823 srb_t *sp = NULL;
1824 uint32_t req_data_len;
1825 uint32_t rsp_data_len;
1826
1827 /* Check the type of the adapter */
1828 if (!IS_BIDI_CAPABLE(ha)) {
1829 ql_log(ql_log_warn, vha, 0x70a0,
1830 "This adapter is not supported\n");
1831 rval = EXT_STATUS_NOT_SUPPORTED;
1832 goto done;
1833 }
1834
1835 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1836 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1837 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1838 rval = EXT_STATUS_BUSY;
1839 goto done;
1840 }
1841
1842 /* Check if host is online */
1843 if (!vha->flags.online) {
1844 ql_log(ql_log_warn, vha, 0x70a1,
1845 "Host is not online\n");
1846 rval = EXT_STATUS_DEVICE_OFFLINE;
1847 goto done;
1848 }
1849
1850 /* Check if cable is plugged in or not */
1851 if (vha->device_flags & DFLG_NO_CABLE) {
1852 ql_log(ql_log_warn, vha, 0x70a2,
1853 "Cable is unplugged...\n");
1854 rval = EXT_STATUS_INVALID_CFG;
1855 goto done;
1856 }
1857
1858 /* Check if the switch is connected or not */
1859 if (ha->current_topology != ISP_CFG_F) {
1860 ql_log(ql_log_warn, vha, 0x70a3,
1861 "Host is not connected to the switch\n");
1862 rval = EXT_STATUS_INVALID_CFG;
1863 goto done;
1864 }
1865
1866 /* Check if operating mode is P2P */
1867 if (ha->operating_mode != P2P) {
1868 ql_log(ql_log_warn, vha, 0x70a4,
1869 "Host operating mode is not P2p\n");
1870 rval = EXT_STATUS_INVALID_CFG;
1871 goto done;
1872 }
1873
1874 mutex_lock(&ha->selflogin_lock);
1875 if (vha->self_login_loop_id == 0) {
1876 /* Initialize all required fields of fcport */
1877 vha->bidir_fcport.vha = vha;
1878 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1879 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1880 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1881 vha->bidir_fcport.loop_id = vha->loop_id;
1882
1883 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1884 ql_log(ql_log_warn, vha, 0x70a7,
1885 "Failed to login port %06X for bidirectional IOCB\n",
1886 vha->bidir_fcport.d_id.b24);
1887 mutex_unlock(&ha->selflogin_lock);
1888 rval = EXT_STATUS_MAILBOX;
1889 goto done;
1890 }
1891 vha->self_login_loop_id = nextlid - 1;
1892
1893 }
1894 /* Assign the self login loop id to fcport */
1895 mutex_unlock(&ha->selflogin_lock);
1896
1897 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1898
1899 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1900 bsg_job->request_payload.sg_list,
1901 bsg_job->request_payload.sg_cnt,
1902 DMA_TO_DEVICE);
1903
1904 if (!req_sg_cnt) {
1905 rval = EXT_STATUS_NO_MEMORY;
1906 goto done;
1907 }
1908
1909 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1910 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1911 DMA_FROM_DEVICE);
1912
1913 if (!rsp_sg_cnt) {
1914 rval = EXT_STATUS_NO_MEMORY;
1915 goto done_unmap_req_sg;
1916 }
1917
1918 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1919 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1920 ql_dbg(ql_dbg_user, vha, 0x70a9,
1921 "Dma mapping resulted in different sg counts "
1922 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1923 "%x dma_reply_sg_cnt: %x]\n",
1924 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1925 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1926 rval = EXT_STATUS_NO_MEMORY;
1927 goto done_unmap_sg;
1928 }
1929
1930 req_data_len = bsg_job->request_payload.payload_len;
1931 rsp_data_len = bsg_job->reply_payload.payload_len;
1932
1933 if (req_data_len != rsp_data_len) {
1934 rval = EXT_STATUS_BUSY;
1935 ql_log(ql_log_warn, vha, 0x70aa,
1936 "req_data_len != rsp_data_len\n");
1937 goto done_unmap_sg;
1938 }
1939
1940 /* Alloc SRB structure */
1941 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1942 if (!sp) {
1943 ql_dbg(ql_dbg_user, vha, 0x70ac,
1944 "Alloc SRB structure failed\n");
1945 rval = EXT_STATUS_NO_MEMORY;
1946 goto done_unmap_sg;
1947 }
1948
1949 /*Populate srb->ctx with bidir ctx*/
1950 sp->u.bsg_job = bsg_job;
1951 sp->free = qla2x00_bsg_sp_free;
1952 sp->type = SRB_BIDI_CMD;
1953 sp->done = qla2x00_bsg_job_done;
1954
1955 /* Add the read and write sg count */
1956 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1957
1958 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1959 if (rval != EXT_STATUS_OK)
1960 goto done_free_srb;
1961 /* the bsg request will be completed in the interrupt handler */
1962 return rval;
1963
1964 done_free_srb:
1965 mempool_free(sp, ha->srb_mempool);
1966 done_unmap_sg:
1967 dma_unmap_sg(&ha->pdev->dev,
1968 bsg_job->reply_payload.sg_list,
1969 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1970 done_unmap_req_sg:
1971 dma_unmap_sg(&ha->pdev->dev,
1972 bsg_job->request_payload.sg_list,
1973 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1974 done:
1975
1976 /* Return an error vendor specific response
1977 * and complete the bsg request
1978 */
1979 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1980 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1981 bsg_reply->reply_payload_rcv_len = 0;
1982 bsg_reply->result = (DID_OK) << 16;
1983 bsg_job_done(bsg_job, bsg_reply->result,
1984 bsg_reply->reply_payload_rcv_len);
1985 /* Always return success, vendor rsp carries correct status */
1986 return 0;
1987 }
1988
1989 static int
1990 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1991 {
1992 struct fc_bsg_request *bsg_request = bsg_job->request;
1993 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1994 scsi_qla_host_t *vha = shost_priv(host);
1995 struct qla_hw_data *ha = vha->hw;
1996 int rval = (DID_ERROR << 16);
1997 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1998 srb_t *sp;
1999 int req_sg_cnt = 0, rsp_sg_cnt = 0;
2000 struct fc_port *fcport;
2001 char *type = "FC_BSG_HST_FX_MGMT";
2002
2003 /* Copy the IOCB specific information */
2004 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
2005 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2006
2007 /* Dump the vendor information */
2008 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2009 piocb_rqst, sizeof(*piocb_rqst));
2010
2011 if (!vha->flags.online) {
2012 ql_log(ql_log_warn, vha, 0x70d0,
2013 "Host is not online.\n");
2014 rval = -EIO;
2015 goto done;
2016 }
2017
2018 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2019 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2020 bsg_job->request_payload.sg_list,
2021 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2022 if (!req_sg_cnt) {
2023 ql_log(ql_log_warn, vha, 0x70c7,
2024 "dma_map_sg return %d for request\n", req_sg_cnt);
2025 rval = -ENOMEM;
2026 goto done;
2027 }
2028 }
2029
2030 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2031 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2032 bsg_job->reply_payload.sg_list,
2033 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2034 if (!rsp_sg_cnt) {
2035 ql_log(ql_log_warn, vha, 0x70c8,
2036 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2037 rval = -ENOMEM;
2038 goto done_unmap_req_sg;
2039 }
2040 }
2041
2042 ql_dbg(ql_dbg_user, vha, 0x70c9,
2043 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2044 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2045 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2046
2047 /* Allocate a dummy fcport structure, since functions preparing the
2048 * IOCB and mailbox command retrieves port specific information
2049 * from fcport structure. For Host based ELS commands there will be
2050 * no fcport structure allocated
2051 */
2052 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2053 if (!fcport) {
2054 ql_log(ql_log_warn, vha, 0x70ca,
2055 "Failed to allocate fcport.\n");
2056 rval = -ENOMEM;
2057 goto done_unmap_rsp_sg;
2058 }
2059
2060 /* Alloc SRB structure */
2061 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2062 if (!sp) {
2063 ql_log(ql_log_warn, vha, 0x70cb,
2064 "qla2x00_get_sp failed.\n");
2065 rval = -ENOMEM;
2066 goto done_free_fcport;
2067 }
2068
2069 /* Initialize all required fields of fcport */
2070 fcport->vha = vha;
2071 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2072
2073 sp->type = SRB_FXIOCB_BCMD;
2074 sp->name = "bsg_fx_mgmt";
2075 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2076 sp->u.bsg_job = bsg_job;
2077 sp->free = qla2x00_bsg_sp_free;
2078 sp->done = qla2x00_bsg_job_done;
2079
2080 ql_dbg(ql_dbg_user, vha, 0x70cc,
2081 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2082 type, piocb_rqst->func_type, fcport->loop_id);
2083
2084 rval = qla2x00_start_sp(sp);
2085 if (rval != QLA_SUCCESS) {
2086 ql_log(ql_log_warn, vha, 0x70cd,
2087 "qla2x00_start_sp failed=%d.\n", rval);
2088 mempool_free(sp, ha->srb_mempool);
2089 rval = -EIO;
2090 goto done_free_fcport;
2091 }
2092 return rval;
2093
2094 done_free_fcport:
2095 qla2x00_free_fcport(fcport);
2096
2097 done_unmap_rsp_sg:
2098 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2099 dma_unmap_sg(&ha->pdev->dev,
2100 bsg_job->reply_payload.sg_list,
2101 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2102 done_unmap_req_sg:
2103 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2104 dma_unmap_sg(&ha->pdev->dev,
2105 bsg_job->request_payload.sg_list,
2106 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2107
2108 done:
2109 return rval;
2110 }
2111
2112 static int
2113 qla26xx_serdes_op(struct bsg_job *bsg_job)
2114 {
2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2117 scsi_qla_host_t *vha = shost_priv(host);
2118 int rval = 0;
2119 struct qla_serdes_reg sr;
2120
2121 memset(&sr, 0, sizeof(sr));
2122
2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2125
2126 switch (sr.cmd) {
2127 case INT_SC_SERDES_WRITE_REG:
2128 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2129 bsg_reply->reply_payload_rcv_len = 0;
2130 break;
2131 case INT_SC_SERDES_READ_REG:
2132 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2135 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2136 break;
2137 default:
2138 ql_dbg(ql_dbg_user, vha, 0x708c,
2139 "Unknown serdes cmd %x.\n", sr.cmd);
2140 rval = -EINVAL;
2141 break;
2142 }
2143
2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2145 rval ? EXT_STATUS_MAILBOX : 0;
2146
2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2148 bsg_reply->result = DID_OK << 16;
2149 bsg_job_done(bsg_job, bsg_reply->result,
2150 bsg_reply->reply_payload_rcv_len);
2151 return 0;
2152 }
2153
2154 static int
2155 qla8044_serdes_op(struct bsg_job *bsg_job)
2156 {
2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2159 scsi_qla_host_t *vha = shost_priv(host);
2160 int rval = 0;
2161 struct qla_serdes_reg_ex sr;
2162
2163 memset(&sr, 0, sizeof(sr));
2164
2165 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2166 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2167
2168 switch (sr.cmd) {
2169 case INT_SC_SERDES_WRITE_REG:
2170 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2171 bsg_reply->reply_payload_rcv_len = 0;
2172 break;
2173 case INT_SC_SERDES_READ_REG:
2174 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2175 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2176 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2177 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2178 break;
2179 default:
2180 ql_dbg(ql_dbg_user, vha, 0x7020,
2181 "Unknown serdes cmd %x.\n", sr.cmd);
2182 rval = -EINVAL;
2183 break;
2184 }
2185
2186 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2187 rval ? EXT_STATUS_MAILBOX : 0;
2188
2189 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2190 bsg_reply->result = DID_OK << 16;
2191 bsg_job_done(bsg_job, bsg_reply->result,
2192 bsg_reply->reply_payload_rcv_len);
2193 return 0;
2194 }
2195
2196 static int
2197 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2198 {
2199 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2200 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2201 scsi_qla_host_t *vha = shost_priv(host);
2202 struct qla_hw_data *ha = vha->hw;
2203 struct qla_flash_update_caps cap;
2204
2205 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2206 return -EPERM;
2207
2208 memset(&cap, 0, sizeof(cap));
2209 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2210 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2211 (uint64_t)ha->fw_attributes_h << 16 |
2212 (uint64_t)ha->fw_attributes;
2213
2214 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2215 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2216 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2217
2218 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2219 EXT_STATUS_OK;
2220
2221 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2222 bsg_reply->result = DID_OK << 16;
2223 bsg_job_done(bsg_job, bsg_reply->result,
2224 bsg_reply->reply_payload_rcv_len);
2225 return 0;
2226 }
2227
2228 static int
2229 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2230 {
2231 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2232 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2233 scsi_qla_host_t *vha = shost_priv(host);
2234 struct qla_hw_data *ha = vha->hw;
2235 uint64_t online_fw_attr = 0;
2236 struct qla_flash_update_caps cap;
2237
2238 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2239 return -EPERM;
2240
2241 memset(&cap, 0, sizeof(cap));
2242 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2243 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2244
2245 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2246 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2247 (uint64_t)ha->fw_attributes_h << 16 |
2248 (uint64_t)ha->fw_attributes;
2249
2250 if (online_fw_attr != cap.capabilities) {
2251 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2252 EXT_STATUS_INVALID_PARAM;
2253 return -EINVAL;
2254 }
2255
2256 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2257 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2258 EXT_STATUS_INVALID_PARAM;
2259 return -EINVAL;
2260 }
2261
2262 bsg_reply->reply_payload_rcv_len = 0;
2263
2264 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2265 EXT_STATUS_OK;
2266
2267 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2268 bsg_reply->result = DID_OK << 16;
2269 bsg_job_done(bsg_job, bsg_reply->result,
2270 bsg_reply->reply_payload_rcv_len);
2271 return 0;
2272 }
2273
2274 static int
2275 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2276 {
2277 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2278 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2279 scsi_qla_host_t *vha = shost_priv(host);
2280 struct qla_hw_data *ha = vha->hw;
2281 struct qla_bbcr_data bbcr;
2282 uint16_t loop_id, topo, sw_cap;
2283 uint8_t domain, area, al_pa, state;
2284 int rval;
2285
2286 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2287 return -EPERM;
2288
2289 memset(&bbcr, 0, sizeof(bbcr));
2290
2291 if (vha->flags.bbcr_enable)
2292 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2293 else
2294 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2295
2296 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2297 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2298 &area, &domain, &topo, &sw_cap);
2299 if (rval != QLA_SUCCESS) {
2300 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2301 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2302 bbcr.mbx1 = loop_id;
2303 goto done;
2304 }
2305
2306 state = (vha->bbcr >> 12) & 0x1;
2307
2308 if (state) {
2309 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2310 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2311 } else {
2312 bbcr.state = QLA_BBCR_STATE_ONLINE;
2313 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2314 }
2315
2316 bbcr.configured_bbscn = vha->bbcr & 0xf;
2317 }
2318
2319 done:
2320 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2321 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2322 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2323
2324 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2325
2326 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2327 bsg_reply->result = DID_OK << 16;
2328 bsg_job_done(bsg_job, bsg_reply->result,
2329 bsg_reply->reply_payload_rcv_len);
2330 return 0;
2331 }
2332
2333 static int
2334 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2335 {
2336 struct fc_bsg_request *bsg_request = bsg_job->request;
2337 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2338 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2339 scsi_qla_host_t *vha = shost_priv(host);
2340 struct qla_hw_data *ha = vha->hw;
2341 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2342 struct link_statistics *stats = NULL;
2343 dma_addr_t stats_dma;
2344 int rval;
2345 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2346 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2347
2348 if (test_bit(UNLOADING, &vha->dpc_flags))
2349 return -ENODEV;
2350
2351 if (unlikely(pci_channel_offline(ha->pdev)))
2352 return -ENODEV;
2353
2354 if (qla2x00_reset_active(vha))
2355 return -EBUSY;
2356
2357 if (!IS_FWI2_CAPABLE(ha))
2358 return -EPERM;
2359
2360 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2361 GFP_KERNEL);
2362 if (!stats) {
2363 ql_log(ql_log_warn, vha, 0x70e2,
2364 "Failed to allocate memory for stats.\n");
2365 return -ENOMEM;
2366 }
2367
2368 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2369
2370 if (rval == QLA_SUCCESS) {
2371 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2372 stats, sizeof(*stats));
2373 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2374 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2375 }
2376
2377 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2378 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2379 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2380
2381 bsg_job->reply_len = sizeof(*bsg_reply);
2382 bsg_reply->result = DID_OK << 16;
2383 bsg_job_done(bsg_job, bsg_reply->result,
2384 bsg_reply->reply_payload_rcv_len);
2385
2386 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2387 stats, stats_dma);
2388
2389 return 0;
2390 }
2391
2392 static int
2393 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2394 {
2395 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2396 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2397 scsi_qla_host_t *vha = shost_priv(host);
2398 int rval;
2399 struct qla_dport_diag *dd;
2400
2401 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2402 !IS_QLA28XX(vha->hw))
2403 return -EPERM;
2404
2405 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2406 if (!dd) {
2407 ql_log(ql_log_warn, vha, 0x70db,
2408 "Failed to allocate memory for dport.\n");
2409 return -ENOMEM;
2410 }
2411
2412 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2413 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2414
2415 rval = qla26xx_dport_diagnostics(
2416 vha, dd->buf, sizeof(dd->buf), dd->options);
2417 if (rval == QLA_SUCCESS) {
2418 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2419 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2420 }
2421
2422 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2423 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2424 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2425
2426 bsg_job->reply_len = sizeof(*bsg_reply);
2427 bsg_reply->result = DID_OK << 16;
2428 bsg_job_done(bsg_job, bsg_reply->result,
2429 bsg_reply->reply_payload_rcv_len);
2430
2431 kfree(dd);
2432
2433 return 0;
2434 }
2435
2436 static int
2437 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2438 {
2439 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2440 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2441 struct qla_hw_data *ha = vha->hw;
2442 struct qla_active_regions regions = { };
2443 struct active_regions active_regions = { };
2444
2445 qla27xx_get_active_image(vha, &active_regions);
2446 regions.global_image = active_regions.global;
2447
2448 if (IS_QLA28XX(ha)) {
2449 qla28xx_get_aux_images(vha, &active_regions);
2450 regions.board_config = active_regions.aux.board_config;
2451 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2452 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2453 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2454 }
2455
2456 ql_dbg(ql_dbg_user, vha, 0x70e1,
2457 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2458 __func__, vha->host_no, regions.global_image,
2459 regions.board_config, regions.vpd_nvram,
2460 regions.npiv_config_0_1, regions.npiv_config_2_3);
2461
2462 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2463 bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2464
2465 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2466 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2467 bsg_reply->result = DID_OK << 16;
2468 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2469 bsg_job_done(bsg_job, bsg_reply->result,
2470 bsg_reply->reply_payload_rcv_len);
2471
2472 return 0;
2473 }
2474
2475 static int
2476 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2477 {
2478 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2480 struct ql_vnd_mng_host_stats_param *req_data;
2481 struct ql_vnd_mng_host_stats_resp rsp_data;
2482 u32 req_data_len;
2483 int ret = 0;
2484
2485 if (!vha->flags.online) {
2486 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2487 return -EIO;
2488 }
2489
2490 req_data_len = bsg_job->request_payload.payload_len;
2491
2492 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2493 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2494 return -EIO;
2495 }
2496
2497 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2498 if (!req_data) {
2499 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2500 return -ENOMEM;
2501 }
2502
2503 /* Copy the request buffer in req_data */
2504 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2505 bsg_job->request_payload.sg_cnt, req_data,
2506 req_data_len);
2507
2508 switch (req_data->action) {
2509 case QLA_STOP:
2510 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2511 break;
2512 case QLA_START:
2513 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2514 break;
2515 case QLA_CLEAR:
2516 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2517 break;
2518 default:
2519 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2520 ret = -EIO;
2521 break;
2522 }
2523
2524 kfree(req_data);
2525
2526 /* Prepare response */
2527 rsp_data.status = ret;
2528 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2529
2530 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2531 bsg_reply->reply_payload_rcv_len =
2532 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2533 bsg_job->reply_payload.sg_cnt,
2534 &rsp_data,
2535 sizeof(struct ql_vnd_mng_host_stats_resp));
2536
2537 bsg_reply->result = DID_OK;
2538 bsg_job_done(bsg_job, bsg_reply->result,
2539 bsg_reply->reply_payload_rcv_len);
2540
2541 return ret;
2542 }
2543
2544 static int
2545 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2546 {
2547 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2548 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2549 struct ql_vnd_stats_param *req_data;
2550 struct ql_vnd_host_stats_resp rsp_data;
2551 u32 req_data_len;
2552 int ret = 0;
2553 u64 ini_entry_count = 0;
2554 u64 entry_count = 0;
2555 u64 tgt_num = 0;
2556 u64 tmp_stat_type = 0;
2557 u64 response_len = 0;
2558 void *data;
2559
2560 req_data_len = bsg_job->request_payload.payload_len;
2561
2562 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2563 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2564 return -EIO;
2565 }
2566
2567 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2568 if (!req_data) {
2569 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2570 return -ENOMEM;
2571 }
2572
2573 /* Copy the request buffer in req_data */
2574 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2575 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2576
2577 /* Copy stat type to work on it */
2578 tmp_stat_type = req_data->stat_type;
2579
2580 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2581 /* Num of tgts connected to this host */
2582 tgt_num = qla2x00_get_num_tgts(vha);
2583 /* unset BIT_17 */
2584 tmp_stat_type &= ~(1 << 17);
2585 }
2586
2587 /* Total ini stats */
2588 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2589
2590 /* Total number of entries */
2591 entry_count = ini_entry_count + tgt_num;
2592
2593 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2594 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2595
2596 if (response_len > bsg_job->reply_payload.payload_len) {
2597 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2598 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2599 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2600
2601 bsg_reply->reply_payload_rcv_len =
2602 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2603 bsg_job->reply_payload.sg_cnt, &rsp_data,
2604 sizeof(struct ql_vnd_mng_host_stats_resp));
2605
2606 bsg_reply->result = DID_OK;
2607 bsg_job_done(bsg_job, bsg_reply->result,
2608 bsg_reply->reply_payload_rcv_len);
2609 goto host_stat_out;
2610 }
2611
2612 data = kzalloc(response_len, GFP_KERNEL);
2613 if (!data) {
2614 ret = -ENOMEM;
2615 goto host_stat_out;
2616 }
2617
2618 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2619 data, response_len);
2620
2621 rsp_data.status = EXT_STATUS_OK;
2622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2623
2624 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2625 bsg_job->reply_payload.sg_cnt,
2626 data, response_len);
2627 bsg_reply->result = DID_OK;
2628 bsg_job_done(bsg_job, bsg_reply->result,
2629 bsg_reply->reply_payload_rcv_len);
2630
2631 kfree(data);
2632 host_stat_out:
2633 kfree(req_data);
2634 return ret;
2635 }
2636
2637 static struct fc_rport *
2638 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2639 {
2640 fc_port_t *fcport = NULL;
2641
2642 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2643 if (fcport->rport->number == tgt_num)
2644 return fcport->rport;
2645 }
2646 return NULL;
2647 }
2648
2649 static int
2650 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2651 {
2652 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2653 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2654 struct ql_vnd_tgt_stats_param *req_data;
2655 u32 req_data_len;
2656 int ret = 0;
2657 u64 response_len = 0;
2658 struct ql_vnd_tgt_stats_resp *data = NULL;
2659 struct fc_rport *rport = NULL;
2660
2661 if (!vha->flags.online) {
2662 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2663 return -EIO;
2664 }
2665
2666 req_data_len = bsg_job->request_payload.payload_len;
2667
2668 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2669 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2670 return -EIO;
2671 }
2672
2673 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2674 if (!req_data) {
2675 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2676 return -ENOMEM;
2677 }
2678
2679 /* Copy the request buffer in req_data */
2680 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2681 bsg_job->request_payload.sg_cnt,
2682 req_data, req_data_len);
2683
2684 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2685 sizeof(struct ql_vnd_stat_entry);
2686
2687 /* structure + size for one entry */
2688 data = kzalloc(response_len, GFP_KERNEL);
2689 if (!data) {
2690 kfree(req_data);
2691 return -ENOMEM;
2692 }
2693
2694 if (response_len > bsg_job->reply_payload.payload_len) {
2695 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2696 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2697 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2698
2699 bsg_reply->reply_payload_rcv_len =
2700 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2701 bsg_job->reply_payload.sg_cnt, data,
2702 sizeof(struct ql_vnd_tgt_stats_resp));
2703
2704 bsg_reply->result = DID_OK;
2705 bsg_job_done(bsg_job, bsg_reply->result,
2706 bsg_reply->reply_payload_rcv_len);
2707 goto tgt_stat_out;
2708 }
2709
2710 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2711 if (!rport) {
2712 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2713 ret = EXT_STATUS_INVALID_PARAM;
2714 data->status = EXT_STATUS_INVALID_PARAM;
2715 goto reply;
2716 }
2717
2718 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2719 rport, (void *)data, response_len);
2720
2721 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2722 reply:
2723 bsg_reply->reply_payload_rcv_len =
2724 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2725 bsg_job->reply_payload.sg_cnt, data,
2726 response_len);
2727 bsg_reply->result = DID_OK;
2728 bsg_job_done(bsg_job, bsg_reply->result,
2729 bsg_reply->reply_payload_rcv_len);
2730
2731 tgt_stat_out:
2732 kfree(data);
2733 kfree(req_data);
2734
2735 return ret;
2736 }
2737
2738 static int
2739 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2740 {
2741 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2742 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2743 struct ql_vnd_mng_host_port_param *req_data;
2744 struct ql_vnd_mng_host_port_resp rsp_data;
2745 u32 req_data_len;
2746 int ret = 0;
2747
2748 req_data_len = bsg_job->request_payload.payload_len;
2749
2750 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2751 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2752 return -EIO;
2753 }
2754
2755 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2756 if (!req_data) {
2757 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2758 return -ENOMEM;
2759 }
2760
2761 /* Copy the request buffer in req_data */
2762 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2763 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2764
2765 switch (req_data->action) {
2766 case QLA_ENABLE:
2767 ret = qla2xxx_enable_port(vha->host);
2768 break;
2769 case QLA_DISABLE:
2770 ret = qla2xxx_disable_port(vha->host);
2771 break;
2772 default:
2773 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2774 ret = -EIO;
2775 break;
2776 }
2777
2778 kfree(req_data);
2779
2780 /* Prepare response */
2781 rsp_data.status = ret;
2782 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2783 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2784
2785 bsg_reply->reply_payload_rcv_len =
2786 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2787 bsg_job->reply_payload.sg_cnt, &rsp_data,
2788 sizeof(struct ql_vnd_mng_host_port_resp));
2789 bsg_reply->result = DID_OK;
2790 bsg_job_done(bsg_job, bsg_reply->result,
2791 bsg_reply->reply_payload_rcv_len);
2792
2793 return ret;
2794 }
2795
2796 static int
2797 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2798 {
2799 struct fc_bsg_request *bsg_request = bsg_job->request;
2800
2801 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2802 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2803
2804 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2805 case QL_VND_LOOPBACK:
2806 return qla2x00_process_loopback(bsg_job);
2807
2808 case QL_VND_A84_RESET:
2809 return qla84xx_reset(bsg_job);
2810
2811 case QL_VND_A84_UPDATE_FW:
2812 return qla84xx_updatefw(bsg_job);
2813
2814 case QL_VND_A84_MGMT_CMD:
2815 return qla84xx_mgmt_cmd(bsg_job);
2816
2817 case QL_VND_IIDMA:
2818 return qla24xx_iidma(bsg_job);
2819
2820 case QL_VND_FCP_PRIO_CFG_CMD:
2821 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2822
2823 case QL_VND_READ_FLASH:
2824 return qla2x00_read_optrom(bsg_job);
2825
2826 case QL_VND_UPDATE_FLASH:
2827 return qla2x00_update_optrom(bsg_job);
2828
2829 case QL_VND_SET_FRU_VERSION:
2830 return qla2x00_update_fru_versions(bsg_job);
2831
2832 case QL_VND_READ_FRU_STATUS:
2833 return qla2x00_read_fru_status(bsg_job);
2834
2835 case QL_VND_WRITE_FRU_STATUS:
2836 return qla2x00_write_fru_status(bsg_job);
2837
2838 case QL_VND_WRITE_I2C:
2839 return qla2x00_write_i2c(bsg_job);
2840
2841 case QL_VND_READ_I2C:
2842 return qla2x00_read_i2c(bsg_job);
2843
2844 case QL_VND_DIAG_IO_CMD:
2845 return qla24xx_process_bidir_cmd(bsg_job);
2846
2847 case QL_VND_FX00_MGMT_CMD:
2848 return qlafx00_mgmt_cmd(bsg_job);
2849
2850 case QL_VND_SERDES_OP:
2851 return qla26xx_serdes_op(bsg_job);
2852
2853 case QL_VND_SERDES_OP_EX:
2854 return qla8044_serdes_op(bsg_job);
2855
2856 case QL_VND_GET_FLASH_UPDATE_CAPS:
2857 return qla27xx_get_flash_upd_cap(bsg_job);
2858
2859 case QL_VND_SET_FLASH_UPDATE_CAPS:
2860 return qla27xx_set_flash_upd_cap(bsg_job);
2861
2862 case QL_VND_GET_BBCR_DATA:
2863 return qla27xx_get_bbcr_data(bsg_job);
2864
2865 case QL_VND_GET_PRIV_STATS:
2866 case QL_VND_GET_PRIV_STATS_EX:
2867 return qla2x00_get_priv_stats(bsg_job);
2868
2869 case QL_VND_DPORT_DIAGNOSTICS:
2870 return qla2x00_do_dport_diagnostics(bsg_job);
2871
2872 case QL_VND_EDIF_MGMT:
2873 return qla_edif_app_mgmt(bsg_job);
2874
2875 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2876 return qla2x00_get_flash_image_status(bsg_job);
2877
2878 case QL_VND_MANAGE_HOST_STATS:
2879 return qla2x00_manage_host_stats(bsg_job);
2880
2881 case QL_VND_GET_HOST_STATS:
2882 return qla2x00_get_host_stats(bsg_job);
2883
2884 case QL_VND_GET_TGT_STATS:
2885 return qla2x00_get_tgt_stats(bsg_job);
2886
2887 case QL_VND_MANAGE_HOST_PORT:
2888 return qla2x00_manage_host_port(bsg_job);
2889
2890 default:
2891 return -ENOSYS;
2892 }
2893 }
2894
2895 int
2896 qla24xx_bsg_request(struct bsg_job *bsg_job)
2897 {
2898 struct fc_bsg_request *bsg_request = bsg_job->request;
2899 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2900 int ret = -EINVAL;
2901 struct fc_rport *rport;
2902 struct Scsi_Host *host;
2903 scsi_qla_host_t *vha;
2904
2905 /* In case no data transferred. */
2906 bsg_reply->reply_payload_rcv_len = 0;
2907
2908 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2909 rport = fc_bsg_to_rport(bsg_job);
2910 if (!rport)
2911 return ret;
2912 host = rport_to_shost(rport);
2913 vha = shost_priv(host);
2914 } else {
2915 host = fc_bsg_to_shost(bsg_job);
2916 vha = shost_priv(host);
2917 }
2918
2919 /* Disable port will bring down the chip, allow enable command */
2920 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2921 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2922 goto skip_chip_chk;
2923
2924 if (vha->hw->flags.port_isolated) {
2925 bsg_reply->result = DID_ERROR;
2926 /* operation not permitted */
2927 return -EPERM;
2928 }
2929
2930 if (qla2x00_chip_is_down(vha)) {
2931 ql_dbg(ql_dbg_user, vha, 0x709f,
2932 "BSG: ISP abort active/needed -- cmd=%d.\n",
2933 bsg_request->msgcode);
2934 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2935 return -EBUSY;
2936 }
2937
2938 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
2939 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2940 return -EIO;
2941 }
2942
2943 skip_chip_chk:
2944 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2945 "Entered %s msgcode=0x%x. bsg ptr %px\n",
2946 __func__, bsg_request->msgcode, bsg_job);
2947
2948 switch (bsg_request->msgcode) {
2949 case FC_BSG_RPT_ELS:
2950 case FC_BSG_HST_ELS_NOLOGIN:
2951 ret = qla2x00_process_els(bsg_job);
2952 break;
2953 case FC_BSG_HST_CT:
2954 ret = qla2x00_process_ct(bsg_job);
2955 break;
2956 case FC_BSG_HST_VENDOR:
2957 ret = qla2x00_process_vendor_specific(vha, bsg_job);
2958 break;
2959 case FC_BSG_HST_ADD_RPORT:
2960 case FC_BSG_HST_DEL_RPORT:
2961 case FC_BSG_RPT_CT:
2962 default:
2963 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2964 break;
2965 }
2966
2967 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2968 "%s done with return %x\n", __func__, ret);
2969
2970 return ret;
2971 }
2972
2973 static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
2974 {
2975 bool found, do_bsg_done;
2976 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2977 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2978 struct qla_hw_data *ha = vha->hw;
2979 srb_t *sp = NULL;
2980 int cnt;
2981 unsigned long flags;
2982 struct req_que *req;
2983 int rval;
2984 DECLARE_COMPLETION_ONSTACK(comp);
2985 uint32_t ratov_j;
2986
2987 found = do_bsg_done = false;
2988
2989 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2990 req = qpair->req;
2991
2992 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2993 sp = req->outstanding_cmds[cnt];
2994 if (sp &&
2995 (sp->type == SRB_CT_CMD ||
2996 sp->type == SRB_ELS_CMD_HST ||
2997 sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
2998 sp->u.bsg_job == bsg_job) {
2999
3000 found = true;
3001 sp->comp = &comp;
3002 break;
3003 }
3004 }
3005 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3006
3007 if (!found)
3008 return false;
3009
3010 if (ha->flags.eeh_busy) {
3011 /* skip over abort. EEH handling will return the bsg. Wait for it */
3012 rval = QLA_SUCCESS;
3013 ql_dbg(ql_dbg_user, vha, 0x802c,
3014 "eeh encounter. bsg %p sp=%p handle=%x \n",
3015 bsg_job, sp, sp->handle);
3016 } else {
3017 rval = ha->isp_ops->abort_command(sp);
3018 ql_dbg(ql_dbg_user, vha, 0x802c,
3019 "Aborting bsg %p sp=%p handle=%x rval=%x\n",
3020 bsg_job, sp, sp->handle, rval);
3021 }
3022
3023 switch (rval) {
3024 case QLA_SUCCESS:
3025 /* Wait for the command completion. */
3026 ratov_j = ha->r_a_tov / 10 * 4 * 1000;
3027 ratov_j = msecs_to_jiffies(ratov_j);
3028
3029 if (!wait_for_completion_timeout(&comp, ratov_j)) {
3030 ql_log(ql_log_info, vha, 0x7089,
3031 "bsg abort timeout. bsg=%p sp=%p handle %#x .\n",
3032 bsg_job, sp, sp->handle);
3033
3034 do_bsg_done = true;
3035 } else {
3036 /* fw had returned the bsg */
3037 ql_dbg(ql_dbg_user, vha, 0x708a,
3038 "bsg abort success. bsg %p sp=%p handle=%#x\n",
3039 bsg_job, sp, sp->handle);
3040 do_bsg_done = false;
3041 }
3042 break;
3043 default:
3044 ql_log(ql_log_info, vha, 0x704f,
3045 "bsg abort fail. bsg=%p sp=%p rval=%x.\n",
3046 bsg_job, sp, rval);
3047
3048 do_bsg_done = true;
3049 break;
3050 }
3051
3052 if (!do_bsg_done)
3053 return true;
3054
3055 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3056 /*
3057 * recheck to make sure it's still the same bsg_job due to
3058 * qp_lock_ptr was released earlier.
3059 */
3060 if (req->outstanding_cmds[cnt] &&
3061 req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
3062 /* fw had returned the bsg */
3063 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3064 return true;
3065 }
3066 req->outstanding_cmds[cnt] = NULL;
3067 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3068
3069 /* ref: INIT */
3070 sp->comp = NULL;
3071 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3072 bsg_reply->result = -ENXIO;
3073 bsg_reply->reply_payload_rcv_len = 0;
3074
3075 ql_dbg(ql_dbg_user, vha, 0x7051,
3076 "%s bsg_job_done : bsg %p result %#x sp %p.\n",
3077 __func__, bsg_job, bsg_reply->result, sp);
3078
3079 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3080
3081 return true;
3082 }
3083
3084 int
3085 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
3086 {
3087 struct fc_bsg_request *bsg_request = bsg_job->request;
3088 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3089 struct qla_hw_data *ha = vha->hw;
3090 int i;
3091 struct qla_qpair *qpair;
3092
3093 ql_log(ql_log_info, vha, 0x708b,
3094 "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
3095 __func__, bsg_job, bsg_request->msgcode,
3096 bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
3097
3098 if (qla2x00_isp_reg_stat(ha)) {
3099 ql_log(ql_log_info, vha, 0x9007,
3100 "PCI/Register disconnect.\n");
3101 qla_pci_set_eeh_busy(vha);
3102 }
3103
3104 if (qla_bsg_found(ha->base_qpair, bsg_job))
3105 goto done;
3106
3107 /* find the bsg job from the active list of commands */
3108 for (i = 0; i < ha->max_qpairs; i++) {
3109 qpair = vha->hw->queue_pair_map[i];
3110 if (!qpair)
3111 continue;
3112 if (qla_bsg_found(qpair, bsg_job))
3113 goto done;
3114 }
3115
3116 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3117
3118 done:
3119 return 0;
3120 }