1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
19 qla2x00_free_fcport(fcport);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27 struct completion *comp = sp->comp;
29 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
30 "%s: sp hdl %x, result=%x bsg ptr %p\n",
31 __func__, sp->handle, res, bsg_job);
34 kref_put(&sp->cmd_kref, qla2x00_sp_release);
36 bsg_reply->result = res;
37 bsg_job_done(bsg_job, bsg_reply->result,
38 bsg_reply->reply_payload_rcv_len);
44 void qla2x00_bsg_sp_free(srb_t *sp)
46 struct qla_hw_data *ha = sp->vha->hw;
47 struct bsg_job *bsg_job = sp->u.bsg_job;
48 struct fc_bsg_request *bsg_request = bsg_job->request;
49 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
51 if (sp->type == SRB_FXIOCB_BCMD) {
52 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
53 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
55 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
56 dma_unmap_sg(&ha->pdev->dev,
57 bsg_job->request_payload.sg_list,
58 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
60 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
61 dma_unmap_sg(&ha->pdev->dev,
62 bsg_job->reply_payload.sg_list,
63 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
66 if (sp->remap.remapped) {
67 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
69 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
72 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
73 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
75 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
76 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
80 if (sp->type == SRB_CT_CMD ||
81 sp->type == SRB_FXIOCB_BCMD ||
82 sp->type == SRB_ELS_CMD_HST) {
83 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
84 queue_work(ha->wq, &sp->fcport->free_work);
91 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
92 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
94 int i, ret, num_valid;
96 struct qla_fcp_prio_entry *pri_entry;
97 uint32_t *bcode_val_ptr, bcode_val;
101 bcode = (uint8_t *)pri_cfg;
102 bcode_val_ptr = (uint32_t *)pri_cfg;
103 bcode_val = (uint32_t)(*bcode_val_ptr);
105 if (bcode_val == 0xFFFFFFFF) {
106 /* No FCP Priority config data in flash */
107 ql_dbg(ql_dbg_user, vha, 0x7051,
108 "No FCP Priority config data.\n");
112 if (memcmp(bcode, "HQOS", 4)) {
113 /* Invalid FCP priority data header*/
114 ql_dbg(ql_dbg_user, vha, 0x7052,
115 "Invalid FCP Priority data header. bcode=0x%x.\n",
122 pri_entry = &pri_cfg->entry[0];
123 for (i = 0; i < pri_cfg->num_entries; i++) {
124 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
129 if (num_valid == 0) {
130 /* No valid FCP priority data entries */
131 ql_dbg(ql_dbg_user, vha, 0x7053,
132 "No valid FCP Priority data entries.\n");
135 /* FCP priority data is valid */
136 ql_dbg(ql_dbg_user, vha, 0x7054,
137 "Valid FCP priority data. num entries = %d.\n",
145 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
147 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
148 struct fc_bsg_request *bsg_request = bsg_job->request;
149 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
150 scsi_qla_host_t *vha = shost_priv(host);
151 struct qla_hw_data *ha = vha->hw;
156 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
158 goto exit_fcp_prio_cfg;
161 /* Get the sub command */
162 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
164 /* Only set config is allowed if config memory is not allocated */
165 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
167 goto exit_fcp_prio_cfg;
170 case QLFC_FCP_PRIO_DISABLE:
171 if (ha->flags.fcp_prio_enabled) {
172 ha->flags.fcp_prio_enabled = 0;
173 ha->fcp_prio_cfg->attributes &=
174 ~FCP_PRIO_ATTR_ENABLE;
175 qla24xx_update_all_fcp_prio(vha);
176 bsg_reply->result = DID_OK;
179 bsg_reply->result = (DID_ERROR << 16);
180 goto exit_fcp_prio_cfg;
184 case QLFC_FCP_PRIO_ENABLE:
185 if (!ha->flags.fcp_prio_enabled) {
186 if (ha->fcp_prio_cfg) {
187 ha->flags.fcp_prio_enabled = 1;
188 ha->fcp_prio_cfg->attributes |=
189 FCP_PRIO_ATTR_ENABLE;
190 qla24xx_update_all_fcp_prio(vha);
191 bsg_reply->result = DID_OK;
194 bsg_reply->result = (DID_ERROR << 16);
195 goto exit_fcp_prio_cfg;
200 case QLFC_FCP_PRIO_GET_CONFIG:
201 len = bsg_job->reply_payload.payload_len;
202 if (!len || len > FCP_PRIO_CFG_SIZE) {
204 bsg_reply->result = (DID_ERROR << 16);
205 goto exit_fcp_prio_cfg;
208 bsg_reply->result = DID_OK;
209 bsg_reply->reply_payload_rcv_len =
211 bsg_job->reply_payload.sg_list,
212 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
217 case QLFC_FCP_PRIO_SET_CONFIG:
218 len = bsg_job->request_payload.payload_len;
219 if (!len || len > FCP_PRIO_CFG_SIZE) {
220 bsg_reply->result = (DID_ERROR << 16);
222 goto exit_fcp_prio_cfg;
225 if (!ha->fcp_prio_cfg) {
226 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
227 if (!ha->fcp_prio_cfg) {
228 ql_log(ql_log_warn, vha, 0x7050,
229 "Unable to allocate memory for fcp prio "
230 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
231 bsg_reply->result = (DID_ERROR << 16);
233 goto exit_fcp_prio_cfg;
237 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
238 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
239 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
242 /* validate fcp priority data */
244 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
245 bsg_reply->result = (DID_ERROR << 16);
247 /* If buffer was invalidatic int
248 * fcp_prio_cfg is of no use
250 vfree(ha->fcp_prio_cfg);
251 ha->fcp_prio_cfg = NULL;
252 goto exit_fcp_prio_cfg;
255 ha->flags.fcp_prio_enabled = 0;
256 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
257 ha->flags.fcp_prio_enabled = 1;
258 qla24xx_update_all_fcp_prio(vha);
259 bsg_reply->result = DID_OK;
267 bsg_job_done(bsg_job, bsg_reply->result,
268 bsg_reply->reply_payload_rcv_len);
273 qla2x00_process_els(struct bsg_job *bsg_job)
275 struct fc_bsg_request *bsg_request = bsg_job->request;
276 struct fc_rport *rport;
277 fc_port_t *fcport = NULL;
278 struct Scsi_Host *host;
279 scsi_qla_host_t *vha;
280 struct qla_hw_data *ha;
283 int req_sg_cnt, rsp_sg_cnt;
284 int rval = (DID_ERROR << 16);
285 uint32_t els_cmd = 0;
286 int qla_port_allocated = 0;
288 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
289 rport = fc_bsg_to_rport(bsg_job);
294 fcport = *(fc_port_t **) rport->dd_data;
295 host = rport_to_shost(rport);
296 vha = shost_priv(host);
298 type = "FC_BSG_RPT_ELS";
300 host = fc_bsg_to_shost(bsg_job);
301 vha = shost_priv(host);
303 type = "FC_BSG_HST_ELS_NOLOGIN";
304 els_cmd = bsg_request->rqst_data.h_els.command_code;
305 if (els_cmd == ELS_AUTH_ELS)
306 return qla_edif_process_els(vha, bsg_job);
309 if (!vha->flags.online) {
310 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
315 /* pass through is supported only for ISP 4Gb or higher */
316 if (!IS_FWI2_CAPABLE(ha)) {
317 ql_dbg(ql_dbg_user, vha, 0x7001,
318 "ELS passthru not supported for ISP23xx based adapters.\n");
323 /* Multiple SG's are not supported for ELS requests */
324 if (bsg_job->request_payload.sg_cnt > 1 ||
325 bsg_job->reply_payload.sg_cnt > 1) {
326 ql_dbg(ql_dbg_user, vha, 0x7002,
327 "Multiple SG's are not supported for ELS requests, "
328 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
329 bsg_job->request_payload.sg_cnt,
330 bsg_job->reply_payload.sg_cnt);
335 /* ELS request for rport */
336 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
337 /* make sure the rport is logged in,
338 * if not perform fabric login
340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
341 ql_dbg(ql_dbg_user, vha, 0x7003,
342 "Port %06X is not online for ELS passthru.\n",
348 /* Allocate a dummy fcport structure, since functions
349 * preparing the IOCB and mailbox command retrieves port
350 * specific information from fcport structure. For Host based
351 * ELS commands there will be no fcport structure allocated
353 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
359 qla_port_allocated = 1;
360 /* Initialize all required fields of fcport */
362 fcport->d_id.b.al_pa =
363 bsg_request->rqst_data.h_els.port_id[0];
364 fcport->d_id.b.area =
365 bsg_request->rqst_data.h_els.port_id[1];
366 fcport->d_id.b.domain =
367 bsg_request->rqst_data.h_els.port_id[2];
369 (fcport->d_id.b.al_pa == 0xFD) ?
370 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
374 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
375 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
377 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
378 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
380 goto done_free_fcport;
383 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
384 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
386 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
387 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
389 goto done_free_fcport;
392 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
393 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
394 ql_log(ql_log_warn, vha, 0x7008,
395 "dma mapping resulted in different sg counts, "
396 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
397 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
398 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
403 /* Alloc SRB structure */
404 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
411 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
412 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
414 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
415 "bsg_els_rpt" : "bsg_els_hst");
416 sp->u.bsg_job = bsg_job;
417 sp->free = qla2x00_bsg_sp_free;
418 sp->done = qla2x00_bsg_job_done;
420 ql_dbg(ql_dbg_user, vha, 0x700a,
421 "bsg rqst type: %s els type: %x - loop-id=%x "
422 "portid=%-2x%02x%02x.\n", type,
423 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
424 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
426 rval = qla2x00_start_sp(sp);
427 if (rval != QLA_SUCCESS) {
428 ql_log(ql_log_warn, vha, 0x700e,
429 "qla2x00_start_sp failed = %d\n", rval);
437 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
438 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
439 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
440 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
441 goto done_free_fcport;
444 if (qla_port_allocated)
445 qla2x00_free_fcport(fcport);
450 static inline uint16_t
451 qla24xx_calc_ct_iocbs(uint16_t dsds)
457 iocbs += (dsds - 2) / 5;
465 qla2x00_process_ct(struct bsg_job *bsg_job)
468 struct fc_bsg_request *bsg_request = bsg_job->request;
469 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
470 scsi_qla_host_t *vha = shost_priv(host);
471 struct qla_hw_data *ha = vha->hw;
472 int rval = (DID_ERROR << 16);
473 int req_sg_cnt, rsp_sg_cnt;
475 struct fc_port *fcport;
476 char *type = "FC_BSG_HST_CT";
479 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
480 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
482 ql_log(ql_log_warn, vha, 0x700f,
483 "dma_map_sg return %d for request\n", req_sg_cnt);
488 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
489 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
491 ql_log(ql_log_warn, vha, 0x7010,
492 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
497 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
498 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
499 ql_log(ql_log_warn, vha, 0x7011,
500 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
501 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
502 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
507 if (!vha->flags.online) {
508 ql_log(ql_log_warn, vha, 0x7012,
509 "Host is not online.\n");
515 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
522 loop_id = vha->mgmt_svr_loop_id;
525 ql_dbg(ql_dbg_user, vha, 0x7013,
526 "Unknown loop id: %x.\n", loop_id);
531 /* Allocate a dummy fcport structure, since functions preparing the
532 * IOCB and mailbox command retrieves port specific information
533 * from fcport structure. For Host based ELS commands there will be
534 * no fcport structure allocated
536 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
538 ql_log(ql_log_warn, vha, 0x7014,
539 "Failed to allocate fcport.\n");
544 /* Initialize all required fields of fcport */
546 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
547 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
548 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
549 fcport->loop_id = loop_id;
551 /* Alloc SRB structure */
552 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
554 ql_log(ql_log_warn, vha, 0x7015,
555 "qla2x00_get_sp failed.\n");
557 goto done_free_fcport;
560 sp->type = SRB_CT_CMD;
562 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
563 sp->u.bsg_job = bsg_job;
564 sp->free = qla2x00_bsg_sp_free;
565 sp->done = qla2x00_bsg_job_done;
567 ql_dbg(ql_dbg_user, vha, 0x7016,
568 "bsg rqst type: %s else type: %x - "
569 "loop-id=%x portid=%02x%02x%02x.\n", type,
570 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
571 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
572 fcport->d_id.b.al_pa);
574 rval = qla2x00_start_sp(sp);
575 if (rval != QLA_SUCCESS) {
576 ql_log(ql_log_warn, vha, 0x7017,
577 "qla2x00_start_sp failed=%d.\n", rval);
580 goto done_free_fcport;
585 qla2x00_free_fcport(fcport);
587 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
588 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
589 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
590 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
595 /* Disable loopback mode */
597 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
602 uint16_t new_config[4];
603 struct qla_hw_data *ha = vha->hw;
605 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
606 goto done_reset_internal;
608 memset(new_config, 0 , sizeof(new_config));
609 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
610 ENABLE_INTERNAL_LOOPBACK ||
611 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
612 ENABLE_EXTERNAL_LOOPBACK) {
613 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
614 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
615 (new_config[0] & INTERNAL_LOOPBACK_MASK));
616 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
618 ha->notify_dcbx_comp = wait;
619 ha->notify_lb_portup_comp = wait2;
621 ret = qla81xx_set_port_config(vha, new_config);
622 if (ret != QLA_SUCCESS) {
623 ql_log(ql_log_warn, vha, 0x7025,
624 "Set port config failed.\n");
625 ha->notify_dcbx_comp = 0;
626 ha->notify_lb_portup_comp = 0;
628 goto done_reset_internal;
631 /* Wait for DCBX complete event */
632 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
633 (DCBX_COMP_TIMEOUT * HZ))) {
634 ql_dbg(ql_dbg_user, vha, 0x7026,
635 "DCBX completion not received.\n");
636 ha->notify_dcbx_comp = 0;
637 ha->notify_lb_portup_comp = 0;
639 goto done_reset_internal;
641 ql_dbg(ql_dbg_user, vha, 0x7027,
642 "DCBX completion received.\n");
645 !wait_for_completion_timeout(&ha->lb_portup_comp,
646 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
647 ql_dbg(ql_dbg_user, vha, 0x70c5,
648 "Port up completion not received.\n");
649 ha->notify_lb_portup_comp = 0;
651 goto done_reset_internal;
653 ql_dbg(ql_dbg_user, vha, 0x70c6,
654 "Port up completion received.\n");
656 ha->notify_dcbx_comp = 0;
657 ha->notify_lb_portup_comp = 0;
664 * Set the port configuration to enable the internal or external loopback
665 * depending on the loopback mode.
668 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
669 uint16_t *new_config, uint16_t mode)
673 unsigned long rem_tmo = 0, current_tmo = 0;
674 struct qla_hw_data *ha = vha->hw;
676 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
677 goto done_set_internal;
679 if (mode == INTERNAL_LOOPBACK)
680 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
681 else if (mode == EXTERNAL_LOOPBACK)
682 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
683 ql_dbg(ql_dbg_user, vha, 0x70be,
684 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
686 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
688 ha->notify_dcbx_comp = 1;
689 ret = qla81xx_set_port_config(vha, new_config);
690 if (ret != QLA_SUCCESS) {
691 ql_log(ql_log_warn, vha, 0x7021,
692 "set port config failed.\n");
693 ha->notify_dcbx_comp = 0;
695 goto done_set_internal;
698 /* Wait for DCBX complete event */
699 current_tmo = DCBX_COMP_TIMEOUT * HZ;
701 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
703 if (!ha->idc_extend_tmo || rem_tmo) {
704 ha->idc_extend_tmo = 0;
707 current_tmo = ha->idc_extend_tmo * HZ;
708 ha->idc_extend_tmo = 0;
712 ql_dbg(ql_dbg_user, vha, 0x7022,
713 "DCBX completion not received.\n");
714 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
716 * If the reset of the loopback mode doesn't work take a FCoE
717 * dump and reset the chip.
720 qla2xxx_dump_fw(vha);
721 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
725 if (ha->flags.idc_compl_status) {
726 ql_dbg(ql_dbg_user, vha, 0x70c3,
727 "Bad status in IDC Completion AEN\n");
729 ha->flags.idc_compl_status = 0;
731 ql_dbg(ql_dbg_user, vha, 0x7023,
732 "DCBX completion received.\n");
735 ha->notify_dcbx_comp = 0;
736 ha->idc_extend_tmo = 0;
743 qla2x00_process_loopback(struct bsg_job *bsg_job)
745 struct fc_bsg_request *bsg_request = bsg_job->request;
746 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
747 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
748 scsi_qla_host_t *vha = shost_priv(host);
749 struct qla_hw_data *ha = vha->hw;
751 uint8_t command_sent;
753 struct msg_echo_lb elreq;
754 uint16_t response[MAILBOX_REGISTER_COUNT];
755 uint16_t config[4], new_config[4];
757 void *req_data = NULL;
758 dma_addr_t req_data_dma;
759 uint32_t req_data_len;
760 uint8_t *rsp_data = NULL;
761 dma_addr_t rsp_data_dma;
762 uint32_t rsp_data_len;
764 if (!vha->flags.online) {
765 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
769 memset(&elreq, 0, sizeof(elreq));
771 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
772 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
775 if (!elreq.req_sg_cnt) {
776 ql_log(ql_log_warn, vha, 0x701a,
777 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
781 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
782 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
785 if (!elreq.rsp_sg_cnt) {
786 ql_log(ql_log_warn, vha, 0x701b,
787 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
789 goto done_unmap_req_sg;
792 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
793 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
794 ql_log(ql_log_warn, vha, 0x701c,
795 "dma mapping resulted in different sg counts, "
796 "request_sg_cnt: %x dma_request_sg_cnt: %x "
797 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
798 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
799 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
803 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
804 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
805 &req_data_dma, GFP_KERNEL);
807 ql_log(ql_log_warn, vha, 0x701d,
808 "dma alloc failed for req_data.\n");
813 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
814 &rsp_data_dma, GFP_KERNEL);
816 ql_log(ql_log_warn, vha, 0x7004,
817 "dma alloc failed for rsp_data.\n");
819 goto done_free_dma_req;
822 /* Copy the request buffer in req_data now */
823 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
824 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
826 elreq.send_dma = req_data_dma;
827 elreq.rcv_dma = rsp_data_dma;
828 elreq.transfer_size = req_data_len;
830 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
831 elreq.iteration_count =
832 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
834 if (atomic_read(&vha->loop_state) == LOOP_READY &&
835 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
836 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
837 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
838 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
839 elreq.options == EXTERNAL_LOOPBACK))) {
840 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
841 ql_dbg(ql_dbg_user, vha, 0x701e,
842 "BSG request type: %s.\n", type);
843 command_sent = INT_DEF_LB_ECHO_CMD;
844 rval = qla2x00_echo_test(vha, &elreq, response);
846 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
847 memset(config, 0, sizeof(config));
848 memset(new_config, 0, sizeof(new_config));
850 if (qla81xx_get_port_config(vha, config)) {
851 ql_log(ql_log_warn, vha, 0x701f,
852 "Get port config failed.\n");
854 goto done_free_dma_rsp;
857 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
858 ql_dbg(ql_dbg_user, vha, 0x70c4,
859 "Loopback operation already in "
862 goto done_free_dma_rsp;
865 ql_dbg(ql_dbg_user, vha, 0x70c0,
866 "elreq.options=%04x\n", elreq.options);
868 if (elreq.options == EXTERNAL_LOOPBACK)
869 if (IS_QLA8031(ha) || IS_QLA8044(ha))
870 rval = qla81xx_set_loopback_mode(vha,
871 config, new_config, elreq.options);
873 rval = qla81xx_reset_loopback_mode(vha,
876 rval = qla81xx_set_loopback_mode(vha, config,
877 new_config, elreq.options);
881 goto done_free_dma_rsp;
884 type = "FC_BSG_HST_VENDOR_LOOPBACK";
885 ql_dbg(ql_dbg_user, vha, 0x7028,
886 "BSG request type: %s.\n", type);
888 command_sent = INT_DEF_LB_LOOPBACK_CMD;
889 rval = qla2x00_loopback_test(vha, &elreq, response);
891 if (response[0] == MBS_COMMAND_ERROR &&
892 response[1] == MBS_LB_RESET) {
893 ql_log(ql_log_warn, vha, 0x7029,
894 "MBX command error, Aborting ISP.\n");
895 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
896 qla2xxx_wake_dpc(vha);
897 qla2x00_wait_for_chip_reset(vha);
898 /* Also reset the MPI */
899 if (IS_QLA81XX(ha)) {
900 if (qla81xx_restart_mpi_firmware(vha) !=
902 ql_log(ql_log_warn, vha, 0x702a,
903 "MPI reset failed.\n");
908 goto done_free_dma_rsp;
914 /* Revert back to original port config
915 * Also clear internal loopback
917 ret = qla81xx_reset_loopback_mode(vha,
921 * If the reset of the loopback mode
922 * doesn't work take FCoE dump and then
925 qla2xxx_dump_fw(vha);
926 set_bit(ISP_ABORT_NEEDED,
933 type = "FC_BSG_HST_VENDOR_LOOPBACK";
934 ql_dbg(ql_dbg_user, vha, 0x702b,
935 "BSG request type: %s.\n", type);
936 command_sent = INT_DEF_LB_LOOPBACK_CMD;
937 rval = qla2x00_loopback_test(vha, &elreq, response);
942 ql_log(ql_log_warn, vha, 0x702c,
943 "Vendor request %s failed.\n", type);
946 bsg_reply->result = (DID_ERROR << 16);
947 bsg_reply->reply_payload_rcv_len = 0;
949 ql_dbg(ql_dbg_user, vha, 0x702d,
950 "Vendor request %s completed.\n", type);
951 bsg_reply->result = (DID_OK << 16);
952 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
953 bsg_job->reply_payload.sg_cnt, rsp_data,
957 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
958 sizeof(response) + sizeof(uint8_t);
959 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
960 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
962 fw_sts_ptr += sizeof(response);
963 *fw_sts_ptr = command_sent;
966 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
967 rsp_data, rsp_data_dma);
969 dma_free_coherent(&ha->pdev->dev, req_data_len,
970 req_data, req_data_dma);
972 dma_unmap_sg(&ha->pdev->dev,
973 bsg_job->reply_payload.sg_list,
974 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
976 dma_unmap_sg(&ha->pdev->dev,
977 bsg_job->request_payload.sg_list,
978 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
980 bsg_job_done(bsg_job, bsg_reply->result,
981 bsg_reply->reply_payload_rcv_len);
986 qla84xx_reset(struct bsg_job *bsg_job)
988 struct fc_bsg_request *bsg_request = bsg_job->request;
989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991 scsi_qla_host_t *vha = shost_priv(host);
992 struct qla_hw_data *ha = vha->hw;
996 if (!IS_QLA84XX(ha)) {
997 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
1001 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1003 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
1006 ql_log(ql_log_warn, vha, 0x7030,
1007 "Vendor request 84xx reset failed.\n");
1008 rval = (DID_ERROR << 16);
1011 ql_dbg(ql_dbg_user, vha, 0x7031,
1012 "Vendor request 84xx reset completed.\n");
1013 bsg_reply->result = DID_OK;
1014 bsg_job_done(bsg_job, bsg_reply->result,
1015 bsg_reply->reply_payload_rcv_len);
1022 qla84xx_updatefw(struct bsg_job *bsg_job)
1024 struct fc_bsg_request *bsg_request = bsg_job->request;
1025 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1026 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1027 scsi_qla_host_t *vha = shost_priv(host);
1028 struct qla_hw_data *ha = vha->hw;
1029 struct verify_chip_entry_84xx *mn = NULL;
1030 dma_addr_t mn_dma, fw_dma;
1031 void *fw_buf = NULL;
1039 if (!IS_QLA84XX(ha)) {
1040 ql_dbg(ql_dbg_user, vha, 0x7032,
1041 "Not 84xx, exiting.\n");
1045 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1046 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1048 ql_log(ql_log_warn, vha, 0x7033,
1049 "dma_map_sg returned %d for request.\n", sg_cnt);
1053 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1054 ql_log(ql_log_warn, vha, 0x7034,
1055 "DMA mapping resulted in different sg counts, "
1056 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1057 bsg_job->request_payload.sg_cnt, sg_cnt);
1062 data_len = bsg_job->request_payload.payload_len;
1063 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1064 &fw_dma, GFP_KERNEL);
1066 ql_log(ql_log_warn, vha, 0x7035,
1067 "DMA alloc failed for fw_buf.\n");
1072 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1073 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1075 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1077 ql_log(ql_log_warn, vha, 0x7036,
1078 "DMA alloc failed for fw buffer.\n");
1080 goto done_free_fw_buf;
1083 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1084 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1086 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1087 mn->entry_count = 1;
1089 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1090 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1091 options |= VCO_DIAG_FW;
1093 mn->options = cpu_to_le16(options);
1094 mn->fw_ver = cpu_to_le32(fw_ver);
1095 mn->fw_size = cpu_to_le32(data_len);
1096 mn->fw_seq_size = cpu_to_le32(data_len);
1097 put_unaligned_le64(fw_dma, &mn->dsd.address);
1098 mn->dsd.length = cpu_to_le32(data_len);
1099 mn->data_seg_cnt = cpu_to_le16(1);
1101 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1104 ql_log(ql_log_warn, vha, 0x7037,
1105 "Vendor request 84xx updatefw failed.\n");
1107 rval = (DID_ERROR << 16);
1109 ql_dbg(ql_dbg_user, vha, 0x7038,
1110 "Vendor request 84xx updatefw completed.\n");
1112 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1113 bsg_reply->result = DID_OK;
1116 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1119 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1122 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1123 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1126 bsg_job_done(bsg_job, bsg_reply->result,
1127 bsg_reply->reply_payload_rcv_len);
1132 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1134 struct fc_bsg_request *bsg_request = bsg_job->request;
1135 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1136 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1137 scsi_qla_host_t *vha = shost_priv(host);
1138 struct qla_hw_data *ha = vha->hw;
1139 struct access_chip_84xx *mn = NULL;
1140 dma_addr_t mn_dma, mgmt_dma;
1141 void *mgmt_b = NULL;
1143 struct qla_bsg_a84_mgmt *ql84_mgmt;
1145 uint32_t data_len = 0;
1146 uint32_t dma_direction = DMA_NONE;
1148 if (!IS_QLA84XX(ha)) {
1149 ql_log(ql_log_warn, vha, 0x703a,
1150 "Not 84xx, exiting.\n");
1154 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1156 ql_log(ql_log_warn, vha, 0x703c,
1157 "DMA alloc failed for fw buffer.\n");
1161 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1162 mn->entry_count = 1;
1163 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1164 switch (ql84_mgmt->mgmt.cmd) {
1165 case QLA84_MGMT_READ_MEM:
1166 case QLA84_MGMT_GET_INFO:
1167 sg_cnt = dma_map_sg(&ha->pdev->dev,
1168 bsg_job->reply_payload.sg_list,
1169 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1171 ql_log(ql_log_warn, vha, 0x703d,
1172 "dma_map_sg returned %d for reply.\n", sg_cnt);
1177 dma_direction = DMA_FROM_DEVICE;
1179 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1180 ql_log(ql_log_warn, vha, 0x703e,
1181 "DMA mapping resulted in different sg counts, "
1182 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1183 bsg_job->reply_payload.sg_cnt, sg_cnt);
1188 data_len = bsg_job->reply_payload.payload_len;
1190 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1191 &mgmt_dma, GFP_KERNEL);
1193 ql_log(ql_log_warn, vha, 0x703f,
1194 "DMA alloc failed for mgmt_b.\n");
1199 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1200 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1203 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1205 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1206 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1208 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1212 ql84_mgmt->mgmt.mgmtp.u.info.context);
1216 case QLA84_MGMT_WRITE_MEM:
1217 sg_cnt = dma_map_sg(&ha->pdev->dev,
1218 bsg_job->request_payload.sg_list,
1219 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1222 ql_log(ql_log_warn, vha, 0x7040,
1223 "dma_map_sg returned %d.\n", sg_cnt);
1228 dma_direction = DMA_TO_DEVICE;
1230 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1231 ql_log(ql_log_warn, vha, 0x7041,
1232 "DMA mapping resulted in different sg counts, "
1233 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1234 bsg_job->request_payload.sg_cnt, sg_cnt);
1239 data_len = bsg_job->request_payload.payload_len;
1240 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1241 &mgmt_dma, GFP_KERNEL);
1243 ql_log(ql_log_warn, vha, 0x7042,
1244 "DMA alloc failed for mgmt_b.\n");
1249 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1250 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1252 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1254 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1257 case QLA84_MGMT_CHNG_CONFIG:
1258 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1260 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1263 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1266 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1274 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1275 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1276 mn->dseg_count = cpu_to_le16(1);
1277 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1278 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1281 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1284 ql_log(ql_log_warn, vha, 0x7043,
1285 "Vendor request 84xx mgmt failed.\n");
1287 rval = (DID_ERROR << 16);
1290 ql_dbg(ql_dbg_user, vha, 0x7044,
1291 "Vendor request 84xx mgmt completed.\n");
1293 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1294 bsg_reply->result = DID_OK;
1296 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1297 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1298 bsg_reply->reply_payload_rcv_len =
1299 bsg_job->reply_payload.payload_len;
1301 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1302 bsg_job->reply_payload.sg_cnt, mgmt_b,
1309 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1311 if (dma_direction == DMA_TO_DEVICE)
1312 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1313 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1314 else if (dma_direction == DMA_FROM_DEVICE)
1315 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1316 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1319 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1322 bsg_job_done(bsg_job, bsg_reply->result,
1323 bsg_reply->reply_payload_rcv_len);
1328 qla24xx_iidma(struct bsg_job *bsg_job)
1330 struct fc_bsg_request *bsg_request = bsg_job->request;
1331 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1332 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1333 scsi_qla_host_t *vha = shost_priv(host);
1335 struct qla_port_param *port_param = NULL;
1336 fc_port_t *fcport = NULL;
1338 uint16_t mb[MAILBOX_REGISTER_COUNT];
1339 uint8_t *rsp_ptr = NULL;
1341 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1342 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1346 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1347 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1348 ql_log(ql_log_warn, vha, 0x7048,
1349 "Invalid destination type.\n");
1353 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1354 if (fcport->port_type != FCT_TARGET)
1357 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1358 fcport->port_name, sizeof(fcport->port_name)))
1366 ql_log(ql_log_warn, vha, 0x7049,
1367 "Failed to find port.\n");
1371 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1372 ql_log(ql_log_warn, vha, 0x704a,
1373 "Port is not online.\n");
1377 if (fcport->flags & FCF_LOGIN_NEEDED) {
1378 ql_log(ql_log_warn, vha, 0x704b,
1379 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1383 if (port_param->mode)
1384 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1385 port_param->speed, mb);
1387 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1388 &port_param->speed, mb);
1391 ql_log(ql_log_warn, vha, 0x704c,
1392 "iiDMA cmd failed for %8phN -- "
1393 "%04x %x %04x %04x.\n", fcport->port_name,
1394 rval, fcport->fp_speed, mb[0], mb[1]);
1395 rval = (DID_ERROR << 16);
1397 if (!port_param->mode) {
1398 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1399 sizeof(struct qla_port_param);
1401 rsp_ptr = ((uint8_t *)bsg_reply) +
1402 sizeof(struct fc_bsg_reply);
1404 memcpy(rsp_ptr, port_param,
1405 sizeof(struct qla_port_param));
1408 bsg_reply->result = DID_OK;
1409 bsg_job_done(bsg_job, bsg_reply->result,
1410 bsg_reply->reply_payload_rcv_len);
1417 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1420 struct fc_bsg_request *bsg_request = bsg_job->request;
1423 struct qla_hw_data *ha = vha->hw;
1425 if (unlikely(pci_channel_offline(ha->pdev)))
1428 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1429 if (start > ha->optrom_size) {
1430 ql_log(ql_log_warn, vha, 0x7055,
1431 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1435 if (ha->optrom_state != QLA_SWAITING) {
1436 ql_log(ql_log_info, vha, 0x7056,
1437 "optrom_state %d.\n", ha->optrom_state);
1441 ha->optrom_region_start = start;
1442 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1444 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1446 else if (start == (ha->flt_region_boot * 4) ||
1447 start == (ha->flt_region_fw * 4))
1449 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1450 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1454 ql_log(ql_log_warn, vha, 0x7058,
1455 "Invalid start region 0x%x/0x%x.\n", start,
1456 bsg_job->request_payload.payload_len);
1460 ha->optrom_region_size = start +
1461 bsg_job->request_payload.payload_len > ha->optrom_size ?
1462 ha->optrom_size - start :
1463 bsg_job->request_payload.payload_len;
1464 ha->optrom_state = QLA_SWRITING;
1466 ha->optrom_region_size = start +
1467 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1468 ha->optrom_size - start :
1469 bsg_job->reply_payload.payload_len;
1470 ha->optrom_state = QLA_SREADING;
1473 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1474 if (!ha->optrom_buffer) {
1475 ql_log(ql_log_warn, vha, 0x7059,
1476 "Read: Unable to allocate memory for optrom retrieval "
1477 "(%x)\n", ha->optrom_region_size);
1479 ha->optrom_state = QLA_SWAITING;
1487 qla2x00_read_optrom(struct bsg_job *bsg_job)
1489 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1490 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1491 scsi_qla_host_t *vha = shost_priv(host);
1492 struct qla_hw_data *ha = vha->hw;
1495 if (ha->flags.nic_core_reset_hdlr_active)
1498 mutex_lock(&ha->optrom_mutex);
1499 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1501 mutex_unlock(&ha->optrom_mutex);
1505 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1506 ha->optrom_region_start, ha->optrom_region_size);
1508 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1509 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1510 ha->optrom_region_size);
1512 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1513 bsg_reply->result = DID_OK;
1514 vfree(ha->optrom_buffer);
1515 ha->optrom_buffer = NULL;
1516 ha->optrom_state = QLA_SWAITING;
1517 mutex_unlock(&ha->optrom_mutex);
1518 bsg_job_done(bsg_job, bsg_reply->result,
1519 bsg_reply->reply_payload_rcv_len);
1524 qla2x00_update_optrom(struct bsg_job *bsg_job)
1526 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1527 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1528 scsi_qla_host_t *vha = shost_priv(host);
1529 struct qla_hw_data *ha = vha->hw;
1532 mutex_lock(&ha->optrom_mutex);
1533 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1535 mutex_unlock(&ha->optrom_mutex);
1539 /* Set the isp82xx_no_md_cap not to capture minidump */
1540 ha->flags.isp82xx_no_md_cap = 1;
1542 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1543 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1544 ha->optrom_region_size);
1546 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1547 ha->optrom_region_start, ha->optrom_region_size);
1550 bsg_reply->result = -EINVAL;
1553 bsg_reply->result = DID_OK;
1555 vfree(ha->optrom_buffer);
1556 ha->optrom_buffer = NULL;
1557 ha->optrom_state = QLA_SWAITING;
1558 mutex_unlock(&ha->optrom_mutex);
1559 bsg_job_done(bsg_job, bsg_reply->result,
1560 bsg_reply->reply_payload_rcv_len);
1565 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1567 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1568 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1569 scsi_qla_host_t *vha = shost_priv(host);
1570 struct qla_hw_data *ha = vha->hw;
1572 uint8_t bsg[DMA_POOL_SIZE];
1573 struct qla_image_version_list *list = (void *)bsg;
1574 struct qla_image_version *image;
1577 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1580 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1581 EXT_STATUS_NO_MEMORY;
1585 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1586 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1588 image = list->version;
1589 count = list->count;
1591 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1592 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1593 image->field_address.device, image->field_address.offset,
1594 sizeof(image->field_info), image->field_address.option);
1596 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1603 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1606 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1609 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1610 bsg_reply->result = DID_OK << 16;
1611 bsg_job_done(bsg_job, bsg_reply->result,
1612 bsg_reply->reply_payload_rcv_len);
1618 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1620 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1621 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1622 scsi_qla_host_t *vha = shost_priv(host);
1623 struct qla_hw_data *ha = vha->hw;
1625 uint8_t bsg[DMA_POOL_SIZE];
1626 struct qla_status_reg *sr = (void *)bsg;
1628 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1631 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1632 EXT_STATUS_NO_MEMORY;
1636 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1637 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1639 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1640 sr->field_address.device, sr->field_address.offset,
1641 sizeof(sr->status_reg), sr->field_address.option);
1642 sr->status_reg = *sfp;
1645 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1650 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1651 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1653 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1656 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1659 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1660 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1661 bsg_reply->result = DID_OK << 16;
1662 bsg_job_done(bsg_job, bsg_reply->result,
1663 bsg_reply->reply_payload_rcv_len);
1669 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1671 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1672 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1673 scsi_qla_host_t *vha = shost_priv(host);
1674 struct qla_hw_data *ha = vha->hw;
1676 uint8_t bsg[DMA_POOL_SIZE];
1677 struct qla_status_reg *sr = (void *)bsg;
1679 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1682 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1683 EXT_STATUS_NO_MEMORY;
1687 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1688 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1690 *sfp = sr->status_reg;
1691 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1692 sr->field_address.device, sr->field_address.offset,
1693 sizeof(sr->status_reg), sr->field_address.option);
1696 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1701 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1704 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1707 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1708 bsg_reply->result = DID_OK << 16;
1709 bsg_job_done(bsg_job, bsg_reply->result,
1710 bsg_reply->reply_payload_rcv_len);
1716 qla2x00_write_i2c(struct bsg_job *bsg_job)
1718 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1719 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1720 scsi_qla_host_t *vha = shost_priv(host);
1721 struct qla_hw_data *ha = vha->hw;
1723 uint8_t bsg[DMA_POOL_SIZE];
1724 struct qla_i2c_access *i2c = (void *)bsg;
1726 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1729 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1730 EXT_STATUS_NO_MEMORY;
1734 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1735 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1737 memcpy(sfp, i2c->buffer, i2c->length);
1738 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1739 i2c->device, i2c->offset, i2c->length, i2c->option);
1742 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1747 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1750 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1753 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1754 bsg_reply->result = DID_OK << 16;
1755 bsg_job_done(bsg_job, bsg_reply->result,
1756 bsg_reply->reply_payload_rcv_len);
1762 qla2x00_read_i2c(struct bsg_job *bsg_job)
1764 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1765 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1766 scsi_qla_host_t *vha = shost_priv(host);
1767 struct qla_hw_data *ha = vha->hw;
1769 uint8_t bsg[DMA_POOL_SIZE];
1770 struct qla_i2c_access *i2c = (void *)bsg;
1772 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1775 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1776 EXT_STATUS_NO_MEMORY;
1780 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1781 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1783 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1784 i2c->device, i2c->offset, i2c->length, i2c->option);
1787 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1792 memcpy(i2c->buffer, sfp, i2c->length);
1793 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1794 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1796 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1799 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1802 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1803 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1804 bsg_reply->result = DID_OK << 16;
1805 bsg_job_done(bsg_job, bsg_reply->result,
1806 bsg_reply->reply_payload_rcv_len);
1812 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1814 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1815 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1816 scsi_qla_host_t *vha = shost_priv(host);
1817 struct qla_hw_data *ha = vha->hw;
1818 uint32_t rval = EXT_STATUS_OK;
1819 uint16_t req_sg_cnt = 0;
1820 uint16_t rsp_sg_cnt = 0;
1821 uint16_t nextlid = 0;
1824 uint32_t req_data_len;
1825 uint32_t rsp_data_len;
1827 /* Check the type of the adapter */
1828 if (!IS_BIDI_CAPABLE(ha)) {
1829 ql_log(ql_log_warn, vha, 0x70a0,
1830 "This adapter is not supported\n");
1831 rval = EXT_STATUS_NOT_SUPPORTED;
1835 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1836 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1837 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1838 rval = EXT_STATUS_BUSY;
1842 /* Check if host is online */
1843 if (!vha->flags.online) {
1844 ql_log(ql_log_warn, vha, 0x70a1,
1845 "Host is not online\n");
1846 rval = EXT_STATUS_DEVICE_OFFLINE;
1850 /* Check if cable is plugged in or not */
1851 if (vha->device_flags & DFLG_NO_CABLE) {
1852 ql_log(ql_log_warn, vha, 0x70a2,
1853 "Cable is unplugged...\n");
1854 rval = EXT_STATUS_INVALID_CFG;
1858 /* Check if the switch is connected or not */
1859 if (ha->current_topology != ISP_CFG_F) {
1860 ql_log(ql_log_warn, vha, 0x70a3,
1861 "Host is not connected to the switch\n");
1862 rval = EXT_STATUS_INVALID_CFG;
1866 /* Check if operating mode is P2P */
1867 if (ha->operating_mode != P2P) {
1868 ql_log(ql_log_warn, vha, 0x70a4,
1869 "Host operating mode is not P2p\n");
1870 rval = EXT_STATUS_INVALID_CFG;
1874 mutex_lock(&ha->selflogin_lock);
1875 if (vha->self_login_loop_id == 0) {
1876 /* Initialize all required fields of fcport */
1877 vha->bidir_fcport.vha = vha;
1878 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1879 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1880 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1881 vha->bidir_fcport.loop_id = vha->loop_id;
1883 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1884 ql_log(ql_log_warn, vha, 0x70a7,
1885 "Failed to login port %06X for bidirectional IOCB\n",
1886 vha->bidir_fcport.d_id.b24);
1887 mutex_unlock(&ha->selflogin_lock);
1888 rval = EXT_STATUS_MAILBOX;
1891 vha->self_login_loop_id = nextlid - 1;
1894 /* Assign the self login loop id to fcport */
1895 mutex_unlock(&ha->selflogin_lock);
1897 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1899 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1900 bsg_job->request_payload.sg_list,
1901 bsg_job->request_payload.sg_cnt,
1905 rval = EXT_STATUS_NO_MEMORY;
1909 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1910 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1914 rval = EXT_STATUS_NO_MEMORY;
1915 goto done_unmap_req_sg;
1918 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1919 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1920 ql_dbg(ql_dbg_user, vha, 0x70a9,
1921 "Dma mapping resulted in different sg counts "
1922 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1923 "%x dma_reply_sg_cnt: %x]\n",
1924 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1925 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1926 rval = EXT_STATUS_NO_MEMORY;
1930 req_data_len = bsg_job->request_payload.payload_len;
1931 rsp_data_len = bsg_job->reply_payload.payload_len;
1933 if (req_data_len != rsp_data_len) {
1934 rval = EXT_STATUS_BUSY;
1935 ql_log(ql_log_warn, vha, 0x70aa,
1936 "req_data_len != rsp_data_len\n");
1940 /* Alloc SRB structure */
1941 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1943 ql_dbg(ql_dbg_user, vha, 0x70ac,
1944 "Alloc SRB structure failed\n");
1945 rval = EXT_STATUS_NO_MEMORY;
1949 /*Populate srb->ctx with bidir ctx*/
1950 sp->u.bsg_job = bsg_job;
1951 sp->free = qla2x00_bsg_sp_free;
1952 sp->type = SRB_BIDI_CMD;
1953 sp->done = qla2x00_bsg_job_done;
1955 /* Add the read and write sg count */
1956 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1958 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1959 if (rval != EXT_STATUS_OK)
1961 /* the bsg request will be completed in the interrupt handler */
1965 mempool_free(sp, ha->srb_mempool);
1967 dma_unmap_sg(&ha->pdev->dev,
1968 bsg_job->reply_payload.sg_list,
1969 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1971 dma_unmap_sg(&ha->pdev->dev,
1972 bsg_job->request_payload.sg_list,
1973 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1976 /* Return an error vendor specific response
1977 * and complete the bsg request
1979 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1980 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1981 bsg_reply->reply_payload_rcv_len = 0;
1982 bsg_reply->result = (DID_OK) << 16;
1983 bsg_job_done(bsg_job, bsg_reply->result,
1984 bsg_reply->reply_payload_rcv_len);
1985 /* Always return success, vendor rsp carries correct status */
1990 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1992 struct fc_bsg_request *bsg_request = bsg_job->request;
1993 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1994 scsi_qla_host_t *vha = shost_priv(host);
1995 struct qla_hw_data *ha = vha->hw;
1996 int rval = (DID_ERROR << 16);
1997 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1999 int req_sg_cnt = 0, rsp_sg_cnt = 0;
2000 struct fc_port *fcport;
2001 char *type = "FC_BSG_HST_FX_MGMT";
2003 /* Copy the IOCB specific information */
2004 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
2005 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2007 /* Dump the vendor information */
2008 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2009 piocb_rqst, sizeof(*piocb_rqst));
2011 if (!vha->flags.online) {
2012 ql_log(ql_log_warn, vha, 0x70d0,
2013 "Host is not online.\n");
2018 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2019 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2020 bsg_job->request_payload.sg_list,
2021 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2023 ql_log(ql_log_warn, vha, 0x70c7,
2024 "dma_map_sg return %d for request\n", req_sg_cnt);
2030 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2031 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2032 bsg_job->reply_payload.sg_list,
2033 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2035 ql_log(ql_log_warn, vha, 0x70c8,
2036 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2038 goto done_unmap_req_sg;
2042 ql_dbg(ql_dbg_user, vha, 0x70c9,
2043 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2044 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2045 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2047 /* Allocate a dummy fcport structure, since functions preparing the
2048 * IOCB and mailbox command retrieves port specific information
2049 * from fcport structure. For Host based ELS commands there will be
2050 * no fcport structure allocated
2052 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2054 ql_log(ql_log_warn, vha, 0x70ca,
2055 "Failed to allocate fcport.\n");
2057 goto done_unmap_rsp_sg;
2060 /* Alloc SRB structure */
2061 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2063 ql_log(ql_log_warn, vha, 0x70cb,
2064 "qla2x00_get_sp failed.\n");
2066 goto done_free_fcport;
2069 /* Initialize all required fields of fcport */
2071 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2073 sp->type = SRB_FXIOCB_BCMD;
2074 sp->name = "bsg_fx_mgmt";
2075 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2076 sp->u.bsg_job = bsg_job;
2077 sp->free = qla2x00_bsg_sp_free;
2078 sp->done = qla2x00_bsg_job_done;
2080 ql_dbg(ql_dbg_user, vha, 0x70cc,
2081 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2082 type, piocb_rqst->func_type, fcport->loop_id);
2084 rval = qla2x00_start_sp(sp);
2085 if (rval != QLA_SUCCESS) {
2086 ql_log(ql_log_warn, vha, 0x70cd,
2087 "qla2x00_start_sp failed=%d.\n", rval);
2088 mempool_free(sp, ha->srb_mempool);
2090 goto done_free_fcport;
2095 qla2x00_free_fcport(fcport);
2098 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2099 dma_unmap_sg(&ha->pdev->dev,
2100 bsg_job->reply_payload.sg_list,
2101 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2103 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2104 dma_unmap_sg(&ha->pdev->dev,
2105 bsg_job->request_payload.sg_list,
2106 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2113 qla26xx_serdes_op(struct bsg_job *bsg_job)
2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2117 scsi_qla_host_t *vha = shost_priv(host);
2119 struct qla_serdes_reg sr;
2121 memset(&sr, 0, sizeof(sr));
2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2127 case INT_SC_SERDES_WRITE_REG:
2128 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2129 bsg_reply->reply_payload_rcv_len = 0;
2131 case INT_SC_SERDES_READ_REG:
2132 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2135 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2138 ql_dbg(ql_dbg_user, vha, 0x708c,
2139 "Unknown serdes cmd %x.\n", sr.cmd);
2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2145 rval ? EXT_STATUS_MAILBOX : 0;
2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2148 bsg_reply->result = DID_OK << 16;
2149 bsg_job_done(bsg_job, bsg_reply->result,
2150 bsg_reply->reply_payload_rcv_len);
2155 qla8044_serdes_op(struct bsg_job *bsg_job)
2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2159 scsi_qla_host_t *vha = shost_priv(host);
2161 struct qla_serdes_reg_ex sr;
2163 memset(&sr, 0, sizeof(sr));
2165 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2166 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2169 case INT_SC_SERDES_WRITE_REG:
2170 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2171 bsg_reply->reply_payload_rcv_len = 0;
2173 case INT_SC_SERDES_READ_REG:
2174 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2175 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2176 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2177 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2180 ql_dbg(ql_dbg_user, vha, 0x7020,
2181 "Unknown serdes cmd %x.\n", sr.cmd);
2186 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2187 rval ? EXT_STATUS_MAILBOX : 0;
2189 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2190 bsg_reply->result = DID_OK << 16;
2191 bsg_job_done(bsg_job, bsg_reply->result,
2192 bsg_reply->reply_payload_rcv_len);
2197 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2199 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2200 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2201 scsi_qla_host_t *vha = shost_priv(host);
2202 struct qla_hw_data *ha = vha->hw;
2203 struct qla_flash_update_caps cap;
2205 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2208 memset(&cap, 0, sizeof(cap));
2209 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2210 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2211 (uint64_t)ha->fw_attributes_h << 16 |
2212 (uint64_t)ha->fw_attributes;
2214 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2215 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2216 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2218 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2221 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2222 bsg_reply->result = DID_OK << 16;
2223 bsg_job_done(bsg_job, bsg_reply->result,
2224 bsg_reply->reply_payload_rcv_len);
2229 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2231 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2232 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2233 scsi_qla_host_t *vha = shost_priv(host);
2234 struct qla_hw_data *ha = vha->hw;
2235 uint64_t online_fw_attr = 0;
2236 struct qla_flash_update_caps cap;
2238 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2241 memset(&cap, 0, sizeof(cap));
2242 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2243 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2245 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2246 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2247 (uint64_t)ha->fw_attributes_h << 16 |
2248 (uint64_t)ha->fw_attributes;
2250 if (online_fw_attr != cap.capabilities) {
2251 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2252 EXT_STATUS_INVALID_PARAM;
2256 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2257 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2258 EXT_STATUS_INVALID_PARAM;
2262 bsg_reply->reply_payload_rcv_len = 0;
2264 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2267 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2268 bsg_reply->result = DID_OK << 16;
2269 bsg_job_done(bsg_job, bsg_reply->result,
2270 bsg_reply->reply_payload_rcv_len);
2275 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2277 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2278 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2279 scsi_qla_host_t *vha = shost_priv(host);
2280 struct qla_hw_data *ha = vha->hw;
2281 struct qla_bbcr_data bbcr;
2282 uint16_t loop_id, topo, sw_cap;
2283 uint8_t domain, area, al_pa, state;
2286 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2289 memset(&bbcr, 0, sizeof(bbcr));
2291 if (vha->flags.bbcr_enable)
2292 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2294 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2296 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2297 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2298 &area, &domain, &topo, &sw_cap);
2299 if (rval != QLA_SUCCESS) {
2300 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2301 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2302 bbcr.mbx1 = loop_id;
2306 state = (vha->bbcr >> 12) & 0x1;
2309 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2310 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2312 bbcr.state = QLA_BBCR_STATE_ONLINE;
2313 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2316 bbcr.configured_bbscn = vha->bbcr & 0xf;
2320 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2321 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2322 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2324 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2326 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2327 bsg_reply->result = DID_OK << 16;
2328 bsg_job_done(bsg_job, bsg_reply->result,
2329 bsg_reply->reply_payload_rcv_len);
2334 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2336 struct fc_bsg_request *bsg_request = bsg_job->request;
2337 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2338 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2339 scsi_qla_host_t *vha = shost_priv(host);
2340 struct qla_hw_data *ha = vha->hw;
2341 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2342 struct link_statistics *stats = NULL;
2343 dma_addr_t stats_dma;
2345 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2346 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2348 if (test_bit(UNLOADING, &vha->dpc_flags))
2351 if (unlikely(pci_channel_offline(ha->pdev)))
2354 if (qla2x00_reset_active(vha))
2357 if (!IS_FWI2_CAPABLE(ha))
2360 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2363 ql_log(ql_log_warn, vha, 0x70e2,
2364 "Failed to allocate memory for stats.\n");
2368 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2370 if (rval == QLA_SUCCESS) {
2371 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2372 stats, sizeof(*stats));
2373 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2374 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2377 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2378 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2379 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2381 bsg_job->reply_len = sizeof(*bsg_reply);
2382 bsg_reply->result = DID_OK << 16;
2383 bsg_job_done(bsg_job, bsg_reply->result,
2384 bsg_reply->reply_payload_rcv_len);
2386 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2393 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2395 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2396 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2397 scsi_qla_host_t *vha = shost_priv(host);
2399 struct qla_dport_diag *dd;
2401 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2402 !IS_QLA28XX(vha->hw))
2405 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2407 ql_log(ql_log_warn, vha, 0x70db,
2408 "Failed to allocate memory for dport.\n");
2412 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2413 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2415 rval = qla26xx_dport_diagnostics(
2416 vha, dd->buf, sizeof(dd->buf), dd->options);
2417 if (rval == QLA_SUCCESS) {
2418 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2419 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2422 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2423 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2424 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2426 bsg_job->reply_len = sizeof(*bsg_reply);
2427 bsg_reply->result = DID_OK << 16;
2428 bsg_job_done(bsg_job, bsg_reply->result,
2429 bsg_reply->reply_payload_rcv_len);
2437 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2439 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2440 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2441 struct qla_hw_data *ha = vha->hw;
2442 struct qla_active_regions regions = { };
2443 struct active_regions active_regions = { };
2445 qla27xx_get_active_image(vha, &active_regions);
2446 regions.global_image = active_regions.global;
2448 if (IS_QLA28XX(ha)) {
2449 qla28xx_get_aux_images(vha, &active_regions);
2450 regions.board_config = active_regions.aux.board_config;
2451 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2452 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2453 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2456 ql_dbg(ql_dbg_user, vha, 0x70e1,
2457 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2458 __func__, vha->host_no, regions.global_image,
2459 regions.board_config, regions.vpd_nvram,
2460 regions.npiv_config_0_1, regions.npiv_config_2_3);
2462 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2463 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2465 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2466 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2467 bsg_reply->result = DID_OK << 16;
2468 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2469 bsg_job_done(bsg_job, bsg_reply->result,
2470 bsg_reply->reply_payload_rcv_len);
2476 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2478 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2480 struct ql_vnd_mng_host_stats_param *req_data;
2481 struct ql_vnd_mng_host_stats_resp rsp_data;
2485 if (!vha->flags.online) {
2486 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2490 req_data_len = bsg_job->request_payload.payload_len;
2492 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2493 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2497 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2499 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2503 /* Copy the request buffer in req_data */
2504 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2505 bsg_job->request_payload.sg_cnt, req_data,
2508 switch (req_data->action) {
2510 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2513 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2516 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2519 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2526 /* Prepare response */
2527 rsp_data.status = ret;
2528 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2530 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2531 bsg_reply->reply_payload_rcv_len =
2532 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2533 bsg_job->reply_payload.sg_cnt,
2535 sizeof(struct ql_vnd_mng_host_stats_resp));
2537 bsg_reply->result = DID_OK;
2538 bsg_job_done(bsg_job, bsg_reply->result,
2539 bsg_reply->reply_payload_rcv_len);
2545 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2547 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2548 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2549 struct ql_vnd_stats_param *req_data;
2550 struct ql_vnd_host_stats_resp rsp_data;
2553 u64 ini_entry_count = 0;
2554 u64 entry_count = 0;
2556 u64 tmp_stat_type = 0;
2557 u64 response_len = 0;
2560 req_data_len = bsg_job->request_payload.payload_len;
2562 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2563 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2567 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2569 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2573 /* Copy the request buffer in req_data */
2574 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2575 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2577 /* Copy stat type to work on it */
2578 tmp_stat_type = req_data->stat_type;
2580 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2581 /* Num of tgts connected to this host */
2582 tgt_num = qla2x00_get_num_tgts(vha);
2584 tmp_stat_type &= ~(1 << 17);
2587 /* Total ini stats */
2588 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2590 /* Total number of entries */
2591 entry_count = ini_entry_count + tgt_num;
2593 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2594 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2596 if (response_len > bsg_job->reply_payload.payload_len) {
2597 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2598 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2599 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2601 bsg_reply->reply_payload_rcv_len =
2602 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2603 bsg_job->reply_payload.sg_cnt, &rsp_data,
2604 sizeof(struct ql_vnd_mng_host_stats_resp));
2606 bsg_reply->result = DID_OK;
2607 bsg_job_done(bsg_job, bsg_reply->result,
2608 bsg_reply->reply_payload_rcv_len);
2612 data = kzalloc(response_len, GFP_KERNEL);
2618 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2619 data, response_len);
2621 rsp_data.status = EXT_STATUS_OK;
2622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2624 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2625 bsg_job->reply_payload.sg_cnt,
2626 data, response_len);
2627 bsg_reply->result = DID_OK;
2628 bsg_job_done(bsg_job, bsg_reply->result,
2629 bsg_reply->reply_payload_rcv_len);
2637 static struct fc_rport *
2638 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2640 fc_port_t *fcport = NULL;
2642 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2643 if (fcport->rport->number == tgt_num)
2644 return fcport->rport;
2650 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2652 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2653 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2654 struct ql_vnd_tgt_stats_param *req_data;
2657 u64 response_len = 0;
2658 struct ql_vnd_tgt_stats_resp *data = NULL;
2659 struct fc_rport *rport = NULL;
2661 if (!vha->flags.online) {
2662 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2666 req_data_len = bsg_job->request_payload.payload_len;
2668 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2669 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2673 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2675 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2679 /* Copy the request buffer in req_data */
2680 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2681 bsg_job->request_payload.sg_cnt,
2682 req_data, req_data_len);
2684 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2685 sizeof(struct ql_vnd_stat_entry);
2687 /* structure + size for one entry */
2688 data = kzalloc(response_len, GFP_KERNEL);
2694 if (response_len > bsg_job->reply_payload.payload_len) {
2695 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2696 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2697 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2699 bsg_reply->reply_payload_rcv_len =
2700 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2701 bsg_job->reply_payload.sg_cnt, data,
2702 sizeof(struct ql_vnd_tgt_stats_resp));
2704 bsg_reply->result = DID_OK;
2705 bsg_job_done(bsg_job, bsg_reply->result,
2706 bsg_reply->reply_payload_rcv_len);
2710 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2712 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2713 ret = EXT_STATUS_INVALID_PARAM;
2714 data->status = EXT_STATUS_INVALID_PARAM;
2718 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2719 rport, (void *)data, response_len);
2721 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2723 bsg_reply->reply_payload_rcv_len =
2724 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2725 bsg_job->reply_payload.sg_cnt, data,
2727 bsg_reply->result = DID_OK;
2728 bsg_job_done(bsg_job, bsg_reply->result,
2729 bsg_reply->reply_payload_rcv_len);
2739 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2741 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2742 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2743 struct ql_vnd_mng_host_port_param *req_data;
2744 struct ql_vnd_mng_host_port_resp rsp_data;
2748 req_data_len = bsg_job->request_payload.payload_len;
2750 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2751 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2755 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2757 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2761 /* Copy the request buffer in req_data */
2762 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2763 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2765 switch (req_data->action) {
2767 ret = qla2xxx_enable_port(vha->host);
2770 ret = qla2xxx_disable_port(vha->host);
2773 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2780 /* Prepare response */
2781 rsp_data.status = ret;
2782 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2783 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2785 bsg_reply->reply_payload_rcv_len =
2786 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2787 bsg_job->reply_payload.sg_cnt, &rsp_data,
2788 sizeof(struct ql_vnd_mng_host_port_resp));
2789 bsg_reply->result = DID_OK;
2790 bsg_job_done(bsg_job, bsg_reply->result,
2791 bsg_reply->reply_payload_rcv_len);
2797 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2799 struct fc_bsg_request *bsg_request = bsg_job->request;
2801 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2802 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2804 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2805 case QL_VND_LOOPBACK:
2806 return qla2x00_process_loopback(bsg_job);
2808 case QL_VND_A84_RESET:
2809 return qla84xx_reset(bsg_job);
2811 case QL_VND_A84_UPDATE_FW:
2812 return qla84xx_updatefw(bsg_job);
2814 case QL_VND_A84_MGMT_CMD:
2815 return qla84xx_mgmt_cmd(bsg_job);
2818 return qla24xx_iidma(bsg_job);
2820 case QL_VND_FCP_PRIO_CFG_CMD:
2821 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2823 case QL_VND_READ_FLASH:
2824 return qla2x00_read_optrom(bsg_job);
2826 case QL_VND_UPDATE_FLASH:
2827 return qla2x00_update_optrom(bsg_job);
2829 case QL_VND_SET_FRU_VERSION:
2830 return qla2x00_update_fru_versions(bsg_job);
2832 case QL_VND_READ_FRU_STATUS:
2833 return qla2x00_read_fru_status(bsg_job);
2835 case QL_VND_WRITE_FRU_STATUS:
2836 return qla2x00_write_fru_status(bsg_job);
2838 case QL_VND_WRITE_I2C:
2839 return qla2x00_write_i2c(bsg_job);
2841 case QL_VND_READ_I2C:
2842 return qla2x00_read_i2c(bsg_job);
2844 case QL_VND_DIAG_IO_CMD:
2845 return qla24xx_process_bidir_cmd(bsg_job);
2847 case QL_VND_FX00_MGMT_CMD:
2848 return qlafx00_mgmt_cmd(bsg_job);
2850 case QL_VND_SERDES_OP:
2851 return qla26xx_serdes_op(bsg_job);
2853 case QL_VND_SERDES_OP_EX:
2854 return qla8044_serdes_op(bsg_job);
2856 case QL_VND_GET_FLASH_UPDATE_CAPS:
2857 return qla27xx_get_flash_upd_cap(bsg_job);
2859 case QL_VND_SET_FLASH_UPDATE_CAPS:
2860 return qla27xx_set_flash_upd_cap(bsg_job);
2862 case QL_VND_GET_BBCR_DATA:
2863 return qla27xx_get_bbcr_data(bsg_job);
2865 case QL_VND_GET_PRIV_STATS:
2866 case QL_VND_GET_PRIV_STATS_EX:
2867 return qla2x00_get_priv_stats(bsg_job);
2869 case QL_VND_DPORT_DIAGNOSTICS:
2870 return qla2x00_do_dport_diagnostics(bsg_job);
2872 case QL_VND_EDIF_MGMT:
2873 return qla_edif_app_mgmt(bsg_job);
2875 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2876 return qla2x00_get_flash_image_status(bsg_job);
2878 case QL_VND_MANAGE_HOST_STATS:
2879 return qla2x00_manage_host_stats(bsg_job);
2881 case QL_VND_GET_HOST_STATS:
2882 return qla2x00_get_host_stats(bsg_job);
2884 case QL_VND_GET_TGT_STATS:
2885 return qla2x00_get_tgt_stats(bsg_job);
2887 case QL_VND_MANAGE_HOST_PORT:
2888 return qla2x00_manage_host_port(bsg_job);
2896 qla24xx_bsg_request(struct bsg_job *bsg_job)
2898 struct fc_bsg_request *bsg_request = bsg_job->request;
2899 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2901 struct fc_rport *rport;
2902 struct Scsi_Host *host;
2903 scsi_qla_host_t *vha;
2905 /* In case no data transferred. */
2906 bsg_reply->reply_payload_rcv_len = 0;
2908 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2909 rport = fc_bsg_to_rport(bsg_job);
2912 host = rport_to_shost(rport);
2913 vha = shost_priv(host);
2915 host = fc_bsg_to_shost(bsg_job);
2916 vha = shost_priv(host);
2919 /* Disable port will bring down the chip, allow enable command */
2920 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2921 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2924 if (vha->hw->flags.port_isolated) {
2925 bsg_reply->result = DID_ERROR;
2926 /* operation not permitted */
2930 if (qla2x00_chip_is_down(vha)) {
2931 ql_dbg(ql_dbg_user, vha, 0x709f,
2932 "BSG: ISP abort active/needed -- cmd=%d.\n",
2933 bsg_request->msgcode);
2934 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2938 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
2939 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
2944 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2945 "Entered %s msgcode=0x%x. bsg ptr %px\n",
2946 __func__, bsg_request->msgcode, bsg_job);
2948 switch (bsg_request->msgcode) {
2949 case FC_BSG_RPT_ELS:
2950 case FC_BSG_HST_ELS_NOLOGIN:
2951 ret = qla2x00_process_els(bsg_job);
2954 ret = qla2x00_process_ct(bsg_job);
2956 case FC_BSG_HST_VENDOR:
2957 ret = qla2x00_process_vendor_specific(vha, bsg_job);
2959 case FC_BSG_HST_ADD_RPORT:
2960 case FC_BSG_HST_DEL_RPORT:
2963 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2967 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
2968 "%s done with return %x\n", __func__, ret);
2973 static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
2975 bool found, do_bsg_done;
2976 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2977 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2978 struct qla_hw_data *ha = vha->hw;
2981 unsigned long flags;
2982 struct req_que *req;
2984 DECLARE_COMPLETION_ONSTACK(comp);
2987 found = do_bsg_done = false;
2989 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2992 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2993 sp = req->outstanding_cmds[cnt];
2995 (sp->type == SRB_CT_CMD ||
2996 sp->type == SRB_ELS_CMD_HST ||
2997 sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
2998 sp->u.bsg_job == bsg_job) {
3005 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3010 if (ha->flags.eeh_busy) {
3011 /* skip over abort. EEH handling will return the bsg. Wait for it */
3013 ql_dbg(ql_dbg_user, vha, 0x802c,
3014 "eeh encounter. bsg %p sp=%p handle=%x \n",
3015 bsg_job, sp, sp->handle);
3017 rval = ha->isp_ops->abort_command(sp);
3018 ql_dbg(ql_dbg_user, vha, 0x802c,
3019 "Aborting bsg %p sp=%p handle=%x rval=%x\n",
3020 bsg_job, sp, sp->handle, rval);
3025 /* Wait for the command completion. */
3026 ratov_j = ha->r_a_tov / 10 * 4 * 1000;
3027 ratov_j = msecs_to_jiffies(ratov_j);
3029 if (!wait_for_completion_timeout(&comp, ratov_j)) {
3030 ql_log(ql_log_info, vha, 0x7089,
3031 "bsg abort timeout. bsg=%p sp=%p handle %#x .\n",
3032 bsg_job, sp, sp->handle);
3036 /* fw had returned the bsg */
3037 ql_dbg(ql_dbg_user, vha, 0x708a,
3038 "bsg abort success. bsg %p sp=%p handle=%#x\n",
3039 bsg_job, sp, sp->handle);
3040 do_bsg_done = false;
3044 ql_log(ql_log_info, vha, 0x704f,
3045 "bsg abort fail. bsg=%p sp=%p rval=%x.\n",
3055 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3057 * recheck to make sure it's still the same bsg_job due to
3058 * qp_lock_ptr was released earlier.
3060 if (req->outstanding_cmds[cnt] &&
3061 req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
3062 /* fw had returned the bsg */
3063 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3066 req->outstanding_cmds[cnt] = NULL;
3067 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3071 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3072 bsg_reply->result = -ENXIO;
3073 bsg_reply->reply_payload_rcv_len = 0;
3075 ql_dbg(ql_dbg_user, vha, 0x7051,
3076 "%s bsg_job_done : bsg %p result %#x sp %p.\n",
3077 __func__, bsg_job, bsg_reply->result, sp);
3079 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3085 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
3087 struct fc_bsg_request *bsg_request = bsg_job->request;
3088 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3089 struct qla_hw_data *ha = vha->hw;
3091 struct qla_qpair *qpair;
3093 ql_log(ql_log_info, vha, 0x708b,
3094 "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
3095 __func__, bsg_job, bsg_request->msgcode,
3096 bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
3098 if (qla2x00_isp_reg_stat(ha)) {
3099 ql_log(ql_log_info, vha, 0x9007,
3100 "PCI/Register disconnect.\n");
3101 qla_pci_set_eeh_busy(vha);
3104 if (qla_bsg_found(ha->base_qpair, bsg_job))
3107 /* find the bsg job from the active list of commands */
3108 for (i = 0; i < ha->max_qpairs; i++) {
3109 qpair = vha->hw->queue_pair_map[i];
3112 if (qla_bsg_found(qpair, bsg_job))
3116 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");