]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/scsi/qla2xxx/qla_mbx.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / scsi / qla2xxx / qla_mbx.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
12
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
21 };
22
23 static const char *mb_to_str(uint16_t cmd)
24 {
25 int i;
26 struct mb_cmd_name *e;
27
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 e = mb_str + i;
30 if (cmd == e->cmd)
31 return e->str;
32 }
33 return "unknown";
34 }
35
36 static struct rom_cmd {
37 uint16_t cmd;
38 } rom_cmds[] = {
39 { MBC_LOAD_RAM },
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
64 };
65
66 static int is_rom_cmd(uint16_t cmd)
67 {
68 int i;
69 struct rom_cmd *wc;
70
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 wc = rom_cmds + i;
73 if (wc->cmd == cmd)
74 return 1;
75 }
76
77 return 0;
78 }
79
80 /*
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
83 *
84 * Input:
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
87 *
88 * Output:
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90 *
91 * Returns:
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
95 *
96 * Context:
97 * Kernel context.
98 */
99 static int
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 {
102 int rval, i;
103 unsigned long flags = 0;
104 device_reg_t *reg;
105 uint8_t abort_active;
106 uint8_t io_lock_on;
107 uint16_t command = 0;
108 uint16_t *iptr;
109 uint16_t __iomem *optr;
110 uint32_t cnt;
111 uint32_t mboxes;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 u32 chip_reset;
116
117
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
119
120 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "PCI channel failed permanently, exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
124 }
125
126 if (vha->device_flags & DFLG_DEV_FAILED) {
127 ql_log(ql_log_warn, vha, 0x1002,
128 "Device in failed state, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
130 }
131
132 /* if PCI error, then avoid mbx processing.*/
133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 ql_log(ql_log_warn, vha, 0xd04e,
136 "PCI error, exiting.\n");
137 return QLA_FUNCTION_TIMEOUT;
138 }
139
140 reg = ha->iobase;
141 io_lock_on = base_vha->flags.init_done;
142
143 rval = QLA_SUCCESS;
144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 chip_reset = ha->chip_reset;
146
147 if (ha->flags.pci_channel_io_perm_failure) {
148 ql_log(ql_log_warn, vha, 0x1003,
149 "Perm failure on EEH timeout MBX, exiting.\n");
150 return QLA_FUNCTION_TIMEOUT;
151 }
152
153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 /* Setting Link-Down error */
155 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 ql_log(ql_log_warn, vha, 0x1004,
157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 return QLA_FUNCTION_TIMEOUT;
159 }
160
161 /* check if ISP abort is active and return cmd with timeout */
162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 !is_rom_cmd(mcp->mb[0])) {
166 ql_log(ql_log_info, vha, 0x1005,
167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 mcp->mb[0]);
169 return QLA_FUNCTION_TIMEOUT;
170 }
171
172 atomic_inc(&ha->num_pend_mbx_stage1);
173 /*
174 * Wait for active mailbox commands to finish by waiting at most tov
175 * seconds. This is to serialize actual issuing of mailbox cmds during
176 * non ISP abort time.
177 */
178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 /* Timeout occurred. Return error. */
180 ql_log(ql_log_warn, vha, 0xd035,
181 "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 mcp->mb[0]);
183 atomic_dec(&ha->num_pend_mbx_stage1);
184 return QLA_FUNCTION_TIMEOUT;
185 }
186 atomic_dec(&ha->num_pend_mbx_stage1);
187 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
188 rval = QLA_ABORTED;
189 goto premature_exit;
190 }
191
192
193 /* Save mailbox command for debug */
194 ha->mcp = mcp;
195
196 ql_dbg(ql_dbg_mbx, vha, 0x1006,
197 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
198
199 spin_lock_irqsave(&ha->hardware_lock, flags);
200
201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
202 ha->flags.mbox_busy) {
203 rval = QLA_ABORTED;
204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
205 goto premature_exit;
206 }
207 ha->flags.mbox_busy = 1;
208
209 /* Load mailbox registers. */
210 if (IS_P3P_TYPE(ha))
211 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
212 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
213 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
214 else
215 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
216
217 iptr = mcp->mb;
218 command = mcp->mb[0];
219 mboxes = mcp->out_mb;
220
221 ql_dbg(ql_dbg_mbx, vha, 0x1111,
222 "Mailbox registers (OUT):\n");
223 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
224 if (IS_QLA2200(ha) && cnt == 8)
225 optr =
226 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
227 if (mboxes & BIT_0) {
228 ql_dbg(ql_dbg_mbx, vha, 0x1112,
229 "mbox[%d]<-0x%04x\n", cnt, *iptr);
230 WRT_REG_WORD(optr, *iptr);
231 }
232
233 mboxes >>= 1;
234 optr++;
235 iptr++;
236 }
237
238 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
239 "I/O Address = %p.\n", optr);
240
241 /* Issue set host interrupt command to send cmd out. */
242 ha->flags.mbox_int = 0;
243 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
244
245 /* Unlock mbx registers and wait for interrupt */
246 ql_dbg(ql_dbg_mbx, vha, 0x100f,
247 "Going to unlock irq & waiting for interrupts. "
248 "jiffies=%lx.\n", jiffies);
249
250 /* Wait for mbx cmd completion until timeout */
251 atomic_inc(&ha->num_pend_mbx_stage2);
252 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
253 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
254
255 if (IS_P3P_TYPE(ha))
256 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
257 else if (IS_FWI2_CAPABLE(ha))
258 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
259 else
260 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
261 spin_unlock_irqrestore(&ha->hardware_lock, flags);
262
263 wait_time = jiffies;
264 atomic_inc(&ha->num_pend_mbx_stage3);
265 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
266 mcp->tov * HZ)) {
267 if (chip_reset != ha->chip_reset) {
268 spin_lock_irqsave(&ha->hardware_lock, flags);
269 ha->flags.mbox_busy = 0;
270 spin_unlock_irqrestore(&ha->hardware_lock,
271 flags);
272 atomic_dec(&ha->num_pend_mbx_stage2);
273 atomic_dec(&ha->num_pend_mbx_stage3);
274 rval = QLA_ABORTED;
275 goto premature_exit;
276 }
277 ql_dbg(ql_dbg_mbx, vha, 0x117a,
278 "cmd=%x Timeout.\n", command);
279 spin_lock_irqsave(&ha->hardware_lock, flags);
280 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
282
283 } else if (ha->flags.purge_mbox ||
284 chip_reset != ha->chip_reset) {
285 spin_lock_irqsave(&ha->hardware_lock, flags);
286 ha->flags.mbox_busy = 0;
287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
288 atomic_dec(&ha->num_pend_mbx_stage2);
289 atomic_dec(&ha->num_pend_mbx_stage3);
290 rval = QLA_ABORTED;
291 goto premature_exit;
292 }
293 atomic_dec(&ha->num_pend_mbx_stage3);
294
295 if (time_after(jiffies, wait_time + 5 * HZ))
296 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
297 command, jiffies_to_msecs(jiffies - wait_time));
298 } else {
299 ql_dbg(ql_dbg_mbx, vha, 0x1011,
300 "Cmd=%x Polling Mode.\n", command);
301
302 if (IS_P3P_TYPE(ha)) {
303 if (RD_REG_DWORD(&reg->isp82.hint) &
304 HINT_MBX_INT_PENDING) {
305 ha->flags.mbox_busy = 0;
306 spin_unlock_irqrestore(&ha->hardware_lock,
307 flags);
308 atomic_dec(&ha->num_pend_mbx_stage2);
309 ql_dbg(ql_dbg_mbx, vha, 0x1012,
310 "Pending mailbox timeout, exiting.\n");
311 rval = QLA_FUNCTION_TIMEOUT;
312 goto premature_exit;
313 }
314 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
315 } else if (IS_FWI2_CAPABLE(ha))
316 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
317 else
318 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
319 spin_unlock_irqrestore(&ha->hardware_lock, flags);
320
321 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
322 while (!ha->flags.mbox_int) {
323 if (ha->flags.purge_mbox ||
324 chip_reset != ha->chip_reset) {
325 spin_lock_irqsave(&ha->hardware_lock, flags);
326 ha->flags.mbox_busy = 0;
327 spin_unlock_irqrestore(&ha->hardware_lock,
328 flags);
329 atomic_dec(&ha->num_pend_mbx_stage2);
330 rval = QLA_ABORTED;
331 goto premature_exit;
332 }
333
334 if (time_after(jiffies, wait_time))
335 break;
336
337 /*
338 * Check if it's UNLOADING, cause we cannot poll in
339 * this case, or else a NULL pointer dereference
340 * is triggered.
341 */
342 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
343 return QLA_FUNCTION_TIMEOUT;
344
345 /* Check for pending interrupts. */
346 qla2x00_poll(ha->rsp_q_map[0]);
347
348 if (!ha->flags.mbox_int &&
349 !(IS_QLA2200(ha) &&
350 command == MBC_LOAD_RISC_RAM_EXTENDED))
351 msleep(10);
352 } /* while */
353 ql_dbg(ql_dbg_mbx, vha, 0x1013,
354 "Waited %d sec.\n",
355 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
356 }
357 atomic_dec(&ha->num_pend_mbx_stage2);
358
359 /* Check whether we timed out */
360 if (ha->flags.mbox_int) {
361 uint16_t *iptr2;
362
363 ql_dbg(ql_dbg_mbx, vha, 0x1014,
364 "Cmd=%x completed.\n", command);
365
366 /* Got interrupt. Clear the flag. */
367 ha->flags.mbox_int = 0;
368 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
369
370 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
371 spin_lock_irqsave(&ha->hardware_lock, flags);
372 ha->flags.mbox_busy = 0;
373 spin_unlock_irqrestore(&ha->hardware_lock, flags);
374
375 /* Setting Link-Down error */
376 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
377 ha->mcp = NULL;
378 rval = QLA_FUNCTION_FAILED;
379 ql_log(ql_log_warn, vha, 0xd048,
380 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
381 goto premature_exit;
382 }
383
384 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
385 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
386 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
387 MBS_COMMAND_COMPLETE);
388 rval = QLA_FUNCTION_FAILED;
389 }
390
391 /* Load return mailbox registers. */
392 iptr2 = mcp->mb;
393 iptr = (uint16_t *)&ha->mailbox_out[0];
394 mboxes = mcp->in_mb;
395
396 ql_dbg(ql_dbg_mbx, vha, 0x1113,
397 "Mailbox registers (IN):\n");
398 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
399 if (mboxes & BIT_0) {
400 *iptr2 = *iptr;
401 ql_dbg(ql_dbg_mbx, vha, 0x1114,
402 "mbox[%d]->0x%04x\n", cnt, *iptr2);
403 }
404
405 mboxes >>= 1;
406 iptr2++;
407 iptr++;
408 }
409 } else {
410
411 uint16_t mb[8];
412 uint32_t ictrl, host_status, hccr;
413 uint16_t w;
414
415 if (IS_FWI2_CAPABLE(ha)) {
416 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
417 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
418 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
419 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
420 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
421 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
422 host_status = RD_REG_DWORD(&reg->isp24.host_status);
423 hccr = RD_REG_DWORD(&reg->isp24.hccr);
424
425 ql_log(ql_log_warn, vha, 0xd04c,
426 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
427 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
428 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
429 mb[7], host_status, hccr);
430
431 } else {
432 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
433 ictrl = RD_REG_WORD(&reg->isp.ictrl);
434 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
437 }
438 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
439
440 /* Capture FW dump only, if PCI device active */
441 if (!pci_channel_offline(vha->hw->pdev)) {
442 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
443 if (w == 0xffff || ictrl == 0xffffffff ||
444 (chip_reset != ha->chip_reset)) {
445 /* This is special case if there is unload
446 * of driver happening and if PCI device go
447 * into bad state due to PCI error condition
448 * then only PCI ERR flag would be set.
449 * we will do premature exit for above case.
450 */
451 spin_lock_irqsave(&ha->hardware_lock, flags);
452 ha->flags.mbox_busy = 0;
453 spin_unlock_irqrestore(&ha->hardware_lock,
454 flags);
455 rval = QLA_FUNCTION_TIMEOUT;
456 goto premature_exit;
457 }
458
459 /* Attempt to capture firmware dump for further
460 * anallysis of the current formware state. we do not
461 * need to do this if we are intentionally generating
462 * a dump
463 */
464 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
465 ha->isp_ops->fw_dump(vha, 0);
466 rval = QLA_FUNCTION_TIMEOUT;
467 }
468 }
469 spin_lock_irqsave(&ha->hardware_lock, flags);
470 ha->flags.mbox_busy = 0;
471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
472
473 /* Clean up */
474 ha->mcp = NULL;
475
476 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
477 ql_dbg(ql_dbg_mbx, vha, 0x101a,
478 "Checking for additional resp interrupt.\n");
479
480 /* polling mode for non isp_abort commands. */
481 qla2x00_poll(ha->rsp_q_map[0]);
482 }
483
484 if (rval == QLA_FUNCTION_TIMEOUT &&
485 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
486 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
487 ha->flags.eeh_busy) {
488 /* not in dpc. schedule it for dpc to take over. */
489 ql_dbg(ql_dbg_mbx, vha, 0x101b,
490 "Timeout, schedule isp_abort_needed.\n");
491
492 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
493 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
494 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
495 if (IS_QLA82XX(ha)) {
496 ql_dbg(ql_dbg_mbx, vha, 0x112a,
497 "disabling pause transmit on port "
498 "0 & 1.\n");
499 qla82xx_wr_32(ha,
500 QLA82XX_CRB_NIU + 0x98,
501 CRB_NIU_XG_PAUSE_CTL_P0|
502 CRB_NIU_XG_PAUSE_CTL_P1);
503 }
504 ql_log(ql_log_info, base_vha, 0x101c,
505 "Mailbox cmd timeout occurred, cmd=0x%x, "
506 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
507 "abort.\n", command, mcp->mb[0],
508 ha->flags.eeh_busy);
509 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
510 qla2xxx_wake_dpc(vha);
511 }
512 } else if (current == ha->dpc_thread) {
513 /* call abort directly since we are in the DPC thread */
514 ql_dbg(ql_dbg_mbx, vha, 0x101d,
515 "Timeout, calling abort_isp.\n");
516
517 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
518 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
519 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
520 if (IS_QLA82XX(ha)) {
521 ql_dbg(ql_dbg_mbx, vha, 0x112b,
522 "disabling pause transmit on port "
523 "0 & 1.\n");
524 qla82xx_wr_32(ha,
525 QLA82XX_CRB_NIU + 0x98,
526 CRB_NIU_XG_PAUSE_CTL_P0|
527 CRB_NIU_XG_PAUSE_CTL_P1);
528 }
529 ql_log(ql_log_info, base_vha, 0x101e,
530 "Mailbox cmd timeout occurred, cmd=0x%x, "
531 "mb[0]=0x%x. Scheduling ISP abort ",
532 command, mcp->mb[0]);
533 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
534 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
535 /* Allow next mbx cmd to come in. */
536 complete(&ha->mbx_cmd_comp);
537 if (ha->isp_ops->abort_isp(vha)) {
538 /* Failed. retry later. */
539 set_bit(ISP_ABORT_NEEDED,
540 &vha->dpc_flags);
541 }
542 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
543 ql_dbg(ql_dbg_mbx, vha, 0x101f,
544 "Finished abort_isp.\n");
545 goto mbx_done;
546 }
547 }
548 }
549
550 premature_exit:
551 /* Allow next mbx cmd to come in. */
552 complete(&ha->mbx_cmd_comp);
553
554 mbx_done:
555 if (rval == QLA_ABORTED) {
556 ql_log(ql_log_info, vha, 0xd035,
557 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
558 mcp->mb[0]);
559 } else if (rval) {
560 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
561 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
562 dev_name(&ha->pdev->dev), 0x1020+0x800,
563 vha->host_no, rval);
564 mboxes = mcp->in_mb;
565 cnt = 4;
566 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
567 if (mboxes & BIT_0) {
568 printk(" mb[%u]=%x", i, mcp->mb[i]);
569 cnt--;
570 }
571 pr_warn(" cmd=%x ****\n", command);
572 }
573 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
574 ql_dbg(ql_dbg_mbx, vha, 0x1198,
575 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
576 RD_REG_DWORD(&reg->isp24.host_status),
577 RD_REG_DWORD(&reg->isp24.ictrl),
578 RD_REG_DWORD(&reg->isp24.istatus));
579 } else {
580 ql_dbg(ql_dbg_mbx, vha, 0x1206,
581 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
582 RD_REG_WORD(&reg->isp.ctrl_status),
583 RD_REG_WORD(&reg->isp.ictrl),
584 RD_REG_WORD(&reg->isp.istatus));
585 }
586 } else {
587 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
588 }
589
590 return rval;
591 }
592
593 int
594 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
595 uint32_t risc_code_size)
596 {
597 int rval;
598 struct qla_hw_data *ha = vha->hw;
599 mbx_cmd_t mc;
600 mbx_cmd_t *mcp = &mc;
601
602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
603 "Entered %s.\n", __func__);
604
605 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
606 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
607 mcp->mb[8] = MSW(risc_addr);
608 mcp->out_mb = MBX_8|MBX_0;
609 } else {
610 mcp->mb[0] = MBC_LOAD_RISC_RAM;
611 mcp->out_mb = MBX_0;
612 }
613 mcp->mb[1] = LSW(risc_addr);
614 mcp->mb[2] = MSW(req_dma);
615 mcp->mb[3] = LSW(req_dma);
616 mcp->mb[6] = MSW(MSD(req_dma));
617 mcp->mb[7] = LSW(MSD(req_dma));
618 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
619 if (IS_FWI2_CAPABLE(ha)) {
620 mcp->mb[4] = MSW(risc_code_size);
621 mcp->mb[5] = LSW(risc_code_size);
622 mcp->out_mb |= MBX_5|MBX_4;
623 } else {
624 mcp->mb[4] = LSW(risc_code_size);
625 mcp->out_mb |= MBX_4;
626 }
627
628 mcp->in_mb = MBX_1|MBX_0;
629 mcp->tov = MBX_TOV_SECONDS;
630 mcp->flags = 0;
631 rval = qla2x00_mailbox_command(vha, mcp);
632
633 if (rval != QLA_SUCCESS) {
634 ql_dbg(ql_dbg_mbx, vha, 0x1023,
635 "Failed=%x mb[0]=%x mb[1]=%x.\n",
636 rval, mcp->mb[0], mcp->mb[1]);
637 } else {
638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
639 "Done %s.\n", __func__);
640 }
641
642 return rval;
643 }
644
645 #define NVME_ENABLE_FLAG BIT_3
646
647 /*
648 * qla2x00_execute_fw
649 * Start adapter firmware.
650 *
651 * Input:
652 * ha = adapter block pointer.
653 * TARGET_QUEUE_LOCK must be released.
654 * ADAPTER_STATE_LOCK must be released.
655 *
656 * Returns:
657 * qla2x00 local function return status code.
658 *
659 * Context:
660 * Kernel context.
661 */
662 int
663 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
664 {
665 int rval;
666 struct qla_hw_data *ha = vha->hw;
667 mbx_cmd_t mc;
668 mbx_cmd_t *mcp = &mc;
669 u8 semaphore = 0;
670 #define EXE_FW_FORCE_SEMAPHORE BIT_7
671 u8 retry = 3;
672
673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
674 "Entered %s.\n", __func__);
675
676 again:
677 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
678 mcp->out_mb = MBX_0;
679 mcp->in_mb = MBX_0;
680 if (IS_FWI2_CAPABLE(ha)) {
681 mcp->mb[1] = MSW(risc_addr);
682 mcp->mb[2] = LSW(risc_addr);
683 mcp->mb[3] = 0;
684 mcp->mb[4] = 0;
685 mcp->mb[11] = 0;
686
687 /* Enable BPM? */
688 if (ha->flags.lr_detected) {
689 mcp->mb[4] = BIT_0;
690 if (IS_BPM_RANGE_CAPABLE(ha))
691 mcp->mb[4] |=
692 ha->lr_distance << LR_DIST_FW_POS;
693 }
694
695 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
696 mcp->mb[4] |= NVME_ENABLE_FLAG;
697
698 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
699 struct nvram_81xx *nv = ha->nvram;
700 /* set minimum speed if specified in nvram */
701 if (nv->min_supported_speed >= 2 &&
702 nv->min_supported_speed <= 5) {
703 mcp->mb[4] |= BIT_4;
704 mcp->mb[11] |= nv->min_supported_speed & 0xF;
705 mcp->out_mb |= MBX_11;
706 mcp->in_mb |= BIT_5;
707 vha->min_supported_speed =
708 nv->min_supported_speed;
709 }
710 }
711
712 if (ha->flags.exlogins_enabled)
713 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
714
715 if (ha->flags.exchoffld_enabled)
716 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
717
718 if (semaphore)
719 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
720
721 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
722 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
723 } else {
724 mcp->mb[1] = LSW(risc_addr);
725 mcp->out_mb |= MBX_1;
726 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
727 mcp->mb[2] = 0;
728 mcp->out_mb |= MBX_2;
729 }
730 }
731
732 mcp->tov = MBX_TOV_SECONDS;
733 mcp->flags = 0;
734 rval = qla2x00_mailbox_command(vha, mcp);
735
736 if (rval != QLA_SUCCESS) {
737 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
738 mcp->mb[1] == 0x27 && retry) {
739 semaphore = 1;
740 retry--;
741 ql_dbg(ql_dbg_async, vha, 0x1026,
742 "Exe FW: force semaphore.\n");
743 goto again;
744 }
745
746 ql_dbg(ql_dbg_mbx, vha, 0x1026,
747 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
748 return rval;
749 }
750
751 if (!IS_FWI2_CAPABLE(ha))
752 goto done;
753
754 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
755 ql_dbg(ql_dbg_mbx, vha, 0x119a,
756 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
757 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
758 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
759 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
760 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
761 ha->max_supported_speed == 0 ? "16Gps" :
762 ha->max_supported_speed == 1 ? "32Gps" :
763 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
764 if (vha->min_supported_speed) {
765 ha->min_supported_speed = mcp->mb[5] &
766 (BIT_0 | BIT_1 | BIT_2);
767 ql_dbg(ql_dbg_mbx, vha, 0x119c,
768 "min_supported_speed=%s.\n",
769 ha->min_supported_speed == 6 ? "64Gps" :
770 ha->min_supported_speed == 5 ? "32Gps" :
771 ha->min_supported_speed == 4 ? "16Gps" :
772 ha->min_supported_speed == 3 ? "8Gps" :
773 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
774 }
775 }
776
777 done:
778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
779 "Done %s.\n", __func__);
780
781 return rval;
782 }
783
784 /*
785 * qla_get_exlogin_status
786 * Get extended login status
787 * uses the memory offload control/status Mailbox
788 *
789 * Input:
790 * ha: adapter state pointer.
791 * fwopt: firmware options
792 *
793 * Returns:
794 * qla2x00 local function status
795 *
796 * Context:
797 * Kernel context.
798 */
799 #define FETCH_XLOGINS_STAT 0x8
800 int
801 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
802 uint16_t *ex_logins_cnt)
803 {
804 int rval;
805 mbx_cmd_t mc;
806 mbx_cmd_t *mcp = &mc;
807
808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
809 "Entered %s\n", __func__);
810
811 memset(mcp->mb, 0 , sizeof(mcp->mb));
812 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
813 mcp->mb[1] = FETCH_XLOGINS_STAT;
814 mcp->out_mb = MBX_1|MBX_0;
815 mcp->in_mb = MBX_10|MBX_4|MBX_0;
816 mcp->tov = MBX_TOV_SECONDS;
817 mcp->flags = 0;
818
819 rval = qla2x00_mailbox_command(vha, mcp);
820 if (rval != QLA_SUCCESS) {
821 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
822 } else {
823 *buf_sz = mcp->mb[4];
824 *ex_logins_cnt = mcp->mb[10];
825
826 ql_log(ql_log_info, vha, 0x1190,
827 "buffer size 0x%x, exchange login count=%d\n",
828 mcp->mb[4], mcp->mb[10]);
829
830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
831 "Done %s.\n", __func__);
832 }
833
834 return rval;
835 }
836
837 /*
838 * qla_set_exlogin_mem_cfg
839 * set extended login memory configuration
840 * Mbx needs to be issues before init_cb is set
841 *
842 * Input:
843 * ha: adapter state pointer.
844 * buffer: buffer pointer
845 * phys_addr: physical address of buffer
846 * size: size of buffer
847 * TARGET_QUEUE_LOCK must be released
848 * ADAPTER_STATE_LOCK must be release
849 *
850 * Returns:
851 * qla2x00 local funxtion status code.
852 *
853 * Context:
854 * Kernel context.
855 */
856 #define CONFIG_XLOGINS_MEM 0x3
857 int
858 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
859 {
860 int rval;
861 mbx_cmd_t mc;
862 mbx_cmd_t *mcp = &mc;
863 struct qla_hw_data *ha = vha->hw;
864
865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
866 "Entered %s.\n", __func__);
867
868 memset(mcp->mb, 0 , sizeof(mcp->mb));
869 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
870 mcp->mb[1] = CONFIG_XLOGINS_MEM;
871 mcp->mb[2] = MSW(phys_addr);
872 mcp->mb[3] = LSW(phys_addr);
873 mcp->mb[6] = MSW(MSD(phys_addr));
874 mcp->mb[7] = LSW(MSD(phys_addr));
875 mcp->mb[8] = MSW(ha->exlogin_size);
876 mcp->mb[9] = LSW(ha->exlogin_size);
877 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
878 mcp->in_mb = MBX_11|MBX_0;
879 mcp->tov = MBX_TOV_SECONDS;
880 mcp->flags = 0;
881 rval = qla2x00_mailbox_command(vha, mcp);
882 if (rval != QLA_SUCCESS) {
883 /*EMPTY*/
884 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
885 } else {
886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
887 "Done %s.\n", __func__);
888 }
889
890 return rval;
891 }
892
893 /*
894 * qla_get_exchoffld_status
895 * Get exchange offload status
896 * uses the memory offload control/status Mailbox
897 *
898 * Input:
899 * ha: adapter state pointer.
900 * fwopt: firmware options
901 *
902 * Returns:
903 * qla2x00 local function status
904 *
905 * Context:
906 * Kernel context.
907 */
908 #define FETCH_XCHOFFLD_STAT 0x2
909 int
910 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
911 uint16_t *ex_logins_cnt)
912 {
913 int rval;
914 mbx_cmd_t mc;
915 mbx_cmd_t *mcp = &mc;
916
917 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
918 "Entered %s\n", __func__);
919
920 memset(mcp->mb, 0 , sizeof(mcp->mb));
921 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
922 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
923 mcp->out_mb = MBX_1|MBX_0;
924 mcp->in_mb = MBX_10|MBX_4|MBX_0;
925 mcp->tov = MBX_TOV_SECONDS;
926 mcp->flags = 0;
927
928 rval = qla2x00_mailbox_command(vha, mcp);
929 if (rval != QLA_SUCCESS) {
930 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
931 } else {
932 *buf_sz = mcp->mb[4];
933 *ex_logins_cnt = mcp->mb[10];
934
935 ql_log(ql_log_info, vha, 0x118e,
936 "buffer size 0x%x, exchange offload count=%d\n",
937 mcp->mb[4], mcp->mb[10]);
938
939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
940 "Done %s.\n", __func__);
941 }
942
943 return rval;
944 }
945
946 /*
947 * qla_set_exchoffld_mem_cfg
948 * Set exchange offload memory configuration
949 * Mbx needs to be issues before init_cb is set
950 *
951 * Input:
952 * ha: adapter state pointer.
953 * buffer: buffer pointer
954 * phys_addr: physical address of buffer
955 * size: size of buffer
956 * TARGET_QUEUE_LOCK must be released
957 * ADAPTER_STATE_LOCK must be release
958 *
959 * Returns:
960 * qla2x00 local funxtion status code.
961 *
962 * Context:
963 * Kernel context.
964 */
965 #define CONFIG_XCHOFFLD_MEM 0x3
966 int
967 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
968 {
969 int rval;
970 mbx_cmd_t mc;
971 mbx_cmd_t *mcp = &mc;
972 struct qla_hw_data *ha = vha->hw;
973
974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
975 "Entered %s.\n", __func__);
976
977 memset(mcp->mb, 0 , sizeof(mcp->mb));
978 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
979 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
980 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
981 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
982 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
983 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
984 mcp->mb[8] = MSW(ha->exchoffld_size);
985 mcp->mb[9] = LSW(ha->exchoffld_size);
986 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
987 mcp->in_mb = MBX_11|MBX_0;
988 mcp->tov = MBX_TOV_SECONDS;
989 mcp->flags = 0;
990 rval = qla2x00_mailbox_command(vha, mcp);
991 if (rval != QLA_SUCCESS) {
992 /*EMPTY*/
993 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
994 } else {
995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
996 "Done %s.\n", __func__);
997 }
998
999 return rval;
1000 }
1001
1002 /*
1003 * qla2x00_get_fw_version
1004 * Get firmware version.
1005 *
1006 * Input:
1007 * ha: adapter state pointer.
1008 * major: pointer for major number.
1009 * minor: pointer for minor number.
1010 * subminor: pointer for subminor number.
1011 *
1012 * Returns:
1013 * qla2x00 local function return status code.
1014 *
1015 * Context:
1016 * Kernel context.
1017 */
1018 int
1019 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1020 {
1021 int rval;
1022 mbx_cmd_t mc;
1023 mbx_cmd_t *mcp = &mc;
1024 struct qla_hw_data *ha = vha->hw;
1025
1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1027 "Entered %s.\n", __func__);
1028
1029 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1030 mcp->out_mb = MBX_0;
1031 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1032 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1033 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1034 if (IS_FWI2_CAPABLE(ha))
1035 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1036 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1037 mcp->in_mb |=
1038 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1039 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1040
1041 mcp->flags = 0;
1042 mcp->tov = MBX_TOV_SECONDS;
1043 rval = qla2x00_mailbox_command(vha, mcp);
1044 if (rval != QLA_SUCCESS)
1045 goto failed;
1046
1047 /* Return mailbox data. */
1048 ha->fw_major_version = mcp->mb[1];
1049 ha->fw_minor_version = mcp->mb[2];
1050 ha->fw_subminor_version = mcp->mb[3];
1051 ha->fw_attributes = mcp->mb[6];
1052 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1053 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1054 else
1055 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1056
1057 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1058 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1059 ha->mpi_version[1] = mcp->mb[11] >> 8;
1060 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1061 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1062 ha->phy_version[0] = mcp->mb[8] & 0xff;
1063 ha->phy_version[1] = mcp->mb[9] >> 8;
1064 ha->phy_version[2] = mcp->mb[9] & 0xff;
1065 }
1066
1067 if (IS_FWI2_CAPABLE(ha)) {
1068 ha->fw_attributes_h = mcp->mb[15];
1069 ha->fw_attributes_ext[0] = mcp->mb[16];
1070 ha->fw_attributes_ext[1] = mcp->mb[17];
1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1072 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1073 __func__, mcp->mb[15], mcp->mb[6]);
1074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1075 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1076 __func__, mcp->mb[17], mcp->mb[16]);
1077
1078 if (ha->fw_attributes_h & 0x4)
1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1080 "%s: Firmware supports Extended Login 0x%x\n",
1081 __func__, ha->fw_attributes_h);
1082
1083 if (ha->fw_attributes_h & 0x8)
1084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1085 "%s: Firmware supports Exchange Offload 0x%x\n",
1086 __func__, ha->fw_attributes_h);
1087
1088 /*
1089 * FW supports nvme and driver load parameter requested nvme.
1090 * BIT 26 of fw_attributes indicates NVMe support.
1091 */
1092 if ((ha->fw_attributes_h &
1093 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1094 ql2xnvmeenable) {
1095 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1096 vha->flags.nvme_first_burst = 1;
1097
1098 vha->flags.nvme_enabled = 1;
1099 ql_log(ql_log_info, vha, 0xd302,
1100 "%s: FC-NVMe is Enabled (0x%x)\n",
1101 __func__, ha->fw_attributes_h);
1102 }
1103 }
1104
1105 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1106 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1107 ha->serdes_version[1] = mcp->mb[8] >> 8;
1108 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1109 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1110 ha->mpi_version[1] = mcp->mb[11] >> 8;
1111 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1112 ha->pep_version[0] = mcp->mb[13] & 0xff;
1113 ha->pep_version[1] = mcp->mb[14] >> 8;
1114 ha->pep_version[2] = mcp->mb[14] & 0xff;
1115 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1116 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1117 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1118 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1119 if (IS_QLA28XX(ha)) {
1120 if (mcp->mb[16] & BIT_10)
1121 ha->flags.secure_fw = 1;
1122
1123 ql_log(ql_log_info, vha, 0xffff,
1124 "Secure Flash Update in FW: %s\n",
1125 (ha->flags.secure_fw) ? "Supported" :
1126 "Not Supported");
1127 }
1128 }
1129
1130 failed:
1131 if (rval != QLA_SUCCESS) {
1132 /*EMPTY*/
1133 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1134 } else {
1135 /*EMPTY*/
1136 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1137 "Done %s.\n", __func__);
1138 }
1139 return rval;
1140 }
1141
1142 /*
1143 * qla2x00_get_fw_options
1144 * Set firmware options.
1145 *
1146 * Input:
1147 * ha = adapter block pointer.
1148 * fwopt = pointer for firmware options.
1149 *
1150 * Returns:
1151 * qla2x00 local function return status code.
1152 *
1153 * Context:
1154 * Kernel context.
1155 */
1156 int
1157 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1158 {
1159 int rval;
1160 mbx_cmd_t mc;
1161 mbx_cmd_t *mcp = &mc;
1162
1163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1164 "Entered %s.\n", __func__);
1165
1166 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1167 mcp->out_mb = MBX_0;
1168 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1169 mcp->tov = MBX_TOV_SECONDS;
1170 mcp->flags = 0;
1171 rval = qla2x00_mailbox_command(vha, mcp);
1172
1173 if (rval != QLA_SUCCESS) {
1174 /*EMPTY*/
1175 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1176 } else {
1177 fwopts[0] = mcp->mb[0];
1178 fwopts[1] = mcp->mb[1];
1179 fwopts[2] = mcp->mb[2];
1180 fwopts[3] = mcp->mb[3];
1181
1182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1183 "Done %s.\n", __func__);
1184 }
1185
1186 return rval;
1187 }
1188
1189
1190 /*
1191 * qla2x00_set_fw_options
1192 * Set firmware options.
1193 *
1194 * Input:
1195 * ha = adapter block pointer.
1196 * fwopt = pointer for firmware options.
1197 *
1198 * Returns:
1199 * qla2x00 local function return status code.
1200 *
1201 * Context:
1202 * Kernel context.
1203 */
1204 int
1205 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1206 {
1207 int rval;
1208 mbx_cmd_t mc;
1209 mbx_cmd_t *mcp = &mc;
1210
1211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1212 "Entered %s.\n", __func__);
1213
1214 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1215 mcp->mb[1] = fwopts[1];
1216 mcp->mb[2] = fwopts[2];
1217 mcp->mb[3] = fwopts[3];
1218 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1219 mcp->in_mb = MBX_0;
1220 if (IS_FWI2_CAPABLE(vha->hw)) {
1221 mcp->in_mb |= MBX_1;
1222 mcp->mb[10] = fwopts[10];
1223 mcp->out_mb |= MBX_10;
1224 } else {
1225 mcp->mb[10] = fwopts[10];
1226 mcp->mb[11] = fwopts[11];
1227 mcp->mb[12] = 0; /* Undocumented, but used */
1228 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1229 }
1230 mcp->tov = MBX_TOV_SECONDS;
1231 mcp->flags = 0;
1232 rval = qla2x00_mailbox_command(vha, mcp);
1233
1234 fwopts[0] = mcp->mb[0];
1235
1236 if (rval != QLA_SUCCESS) {
1237 /*EMPTY*/
1238 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1239 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1240 } else {
1241 /*EMPTY*/
1242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1243 "Done %s.\n", __func__);
1244 }
1245
1246 return rval;
1247 }
1248
1249 /*
1250 * qla2x00_mbx_reg_test
1251 * Mailbox register wrap test.
1252 *
1253 * Input:
1254 * ha = adapter block pointer.
1255 * TARGET_QUEUE_LOCK must be released.
1256 * ADAPTER_STATE_LOCK must be released.
1257 *
1258 * Returns:
1259 * qla2x00 local function return status code.
1260 *
1261 * Context:
1262 * Kernel context.
1263 */
1264 int
1265 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1266 {
1267 int rval;
1268 mbx_cmd_t mc;
1269 mbx_cmd_t *mcp = &mc;
1270
1271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1272 "Entered %s.\n", __func__);
1273
1274 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1275 mcp->mb[1] = 0xAAAA;
1276 mcp->mb[2] = 0x5555;
1277 mcp->mb[3] = 0xAA55;
1278 mcp->mb[4] = 0x55AA;
1279 mcp->mb[5] = 0xA5A5;
1280 mcp->mb[6] = 0x5A5A;
1281 mcp->mb[7] = 0x2525;
1282 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1283 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1284 mcp->tov = MBX_TOV_SECONDS;
1285 mcp->flags = 0;
1286 rval = qla2x00_mailbox_command(vha, mcp);
1287
1288 if (rval == QLA_SUCCESS) {
1289 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1290 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1291 rval = QLA_FUNCTION_FAILED;
1292 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1293 mcp->mb[7] != 0x2525)
1294 rval = QLA_FUNCTION_FAILED;
1295 }
1296
1297 if (rval != QLA_SUCCESS) {
1298 /*EMPTY*/
1299 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1300 } else {
1301 /*EMPTY*/
1302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1303 "Done %s.\n", __func__);
1304 }
1305
1306 return rval;
1307 }
1308
1309 /*
1310 * qla2x00_verify_checksum
1311 * Verify firmware checksum.
1312 *
1313 * Input:
1314 * ha = adapter block pointer.
1315 * TARGET_QUEUE_LOCK must be released.
1316 * ADAPTER_STATE_LOCK must be released.
1317 *
1318 * Returns:
1319 * qla2x00 local function return status code.
1320 *
1321 * Context:
1322 * Kernel context.
1323 */
1324 int
1325 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1326 {
1327 int rval;
1328 mbx_cmd_t mc;
1329 mbx_cmd_t *mcp = &mc;
1330
1331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1332 "Entered %s.\n", __func__);
1333
1334 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1335 mcp->out_mb = MBX_0;
1336 mcp->in_mb = MBX_0;
1337 if (IS_FWI2_CAPABLE(vha->hw)) {
1338 mcp->mb[1] = MSW(risc_addr);
1339 mcp->mb[2] = LSW(risc_addr);
1340 mcp->out_mb |= MBX_2|MBX_1;
1341 mcp->in_mb |= MBX_2|MBX_1;
1342 } else {
1343 mcp->mb[1] = LSW(risc_addr);
1344 mcp->out_mb |= MBX_1;
1345 mcp->in_mb |= MBX_1;
1346 }
1347
1348 mcp->tov = MBX_TOV_SECONDS;
1349 mcp->flags = 0;
1350 rval = qla2x00_mailbox_command(vha, mcp);
1351
1352 if (rval != QLA_SUCCESS) {
1353 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1354 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1355 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1356 } else {
1357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1358 "Done %s.\n", __func__);
1359 }
1360
1361 return rval;
1362 }
1363
1364 /*
1365 * qla2x00_issue_iocb
1366 * Issue IOCB using mailbox command
1367 *
1368 * Input:
1369 * ha = adapter state pointer.
1370 * buffer = buffer pointer.
1371 * phys_addr = physical address of buffer.
1372 * size = size of buffer.
1373 * TARGET_QUEUE_LOCK must be released.
1374 * ADAPTER_STATE_LOCK must be released.
1375 *
1376 * Returns:
1377 * qla2x00 local function return status code.
1378 *
1379 * Context:
1380 * Kernel context.
1381 */
1382 int
1383 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1384 dma_addr_t phys_addr, size_t size, uint32_t tov)
1385 {
1386 int rval;
1387 mbx_cmd_t mc;
1388 mbx_cmd_t *mcp = &mc;
1389
1390 if (!vha->hw->flags.fw_started)
1391 return QLA_INVALID_COMMAND;
1392
1393 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1394 "Entered %s.\n", __func__);
1395
1396 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1397 mcp->mb[1] = 0;
1398 mcp->mb[2] = MSW(LSD(phys_addr));
1399 mcp->mb[3] = LSW(LSD(phys_addr));
1400 mcp->mb[6] = MSW(MSD(phys_addr));
1401 mcp->mb[7] = LSW(MSD(phys_addr));
1402 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1403 mcp->in_mb = MBX_1|MBX_0;
1404 mcp->tov = tov;
1405 mcp->flags = 0;
1406 rval = qla2x00_mailbox_command(vha, mcp);
1407
1408 if (rval != QLA_SUCCESS) {
1409 /*EMPTY*/
1410 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1411 } else {
1412 sts_entry_t *sts_entry = buffer;
1413
1414 /* Mask reserved bits. */
1415 sts_entry->entry_status &=
1416 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1418 "Done %s (status=%x).\n", __func__,
1419 sts_entry->entry_status);
1420 }
1421
1422 return rval;
1423 }
1424
1425 int
1426 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1427 size_t size)
1428 {
1429 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1430 MBX_TOV_SECONDS);
1431 }
1432
1433 /*
1434 * qla2x00_abort_command
1435 * Abort command aborts a specified IOCB.
1436 *
1437 * Input:
1438 * ha = adapter block pointer.
1439 * sp = SB structure pointer.
1440 *
1441 * Returns:
1442 * qla2x00 local function return status code.
1443 *
1444 * Context:
1445 * Kernel context.
1446 */
1447 int
1448 qla2x00_abort_command(srb_t *sp)
1449 {
1450 unsigned long flags = 0;
1451 int rval;
1452 uint32_t handle = 0;
1453 mbx_cmd_t mc;
1454 mbx_cmd_t *mcp = &mc;
1455 fc_port_t *fcport = sp->fcport;
1456 scsi_qla_host_t *vha = fcport->vha;
1457 struct qla_hw_data *ha = vha->hw;
1458 struct req_que *req;
1459 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1460
1461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1462 "Entered %s.\n", __func__);
1463
1464 if (sp->qpair)
1465 req = sp->qpair->req;
1466 else
1467 req = vha->req;
1468
1469 spin_lock_irqsave(&ha->hardware_lock, flags);
1470 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1471 if (req->outstanding_cmds[handle] == sp)
1472 break;
1473 }
1474 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1475
1476 if (handle == req->num_outstanding_cmds) {
1477 /* command not found */
1478 return QLA_FUNCTION_FAILED;
1479 }
1480
1481 mcp->mb[0] = MBC_ABORT_COMMAND;
1482 if (HAS_EXTENDED_IDS(ha))
1483 mcp->mb[1] = fcport->loop_id;
1484 else
1485 mcp->mb[1] = fcport->loop_id << 8;
1486 mcp->mb[2] = (uint16_t)handle;
1487 mcp->mb[3] = (uint16_t)(handle >> 16);
1488 mcp->mb[6] = (uint16_t)cmd->device->lun;
1489 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1490 mcp->in_mb = MBX_0;
1491 mcp->tov = MBX_TOV_SECONDS;
1492 mcp->flags = 0;
1493 rval = qla2x00_mailbox_command(vha, mcp);
1494
1495 if (rval != QLA_SUCCESS) {
1496 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1497 } else {
1498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1499 "Done %s.\n", __func__);
1500 }
1501
1502 return rval;
1503 }
1504
1505 int
1506 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1507 {
1508 int rval, rval2;
1509 mbx_cmd_t mc;
1510 mbx_cmd_t *mcp = &mc;
1511 scsi_qla_host_t *vha;
1512
1513 vha = fcport->vha;
1514
1515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1516 "Entered %s.\n", __func__);
1517
1518 mcp->mb[0] = MBC_ABORT_TARGET;
1519 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1520 if (HAS_EXTENDED_IDS(vha->hw)) {
1521 mcp->mb[1] = fcport->loop_id;
1522 mcp->mb[10] = 0;
1523 mcp->out_mb |= MBX_10;
1524 } else {
1525 mcp->mb[1] = fcport->loop_id << 8;
1526 }
1527 mcp->mb[2] = vha->hw->loop_reset_delay;
1528 mcp->mb[9] = vha->vp_idx;
1529
1530 mcp->in_mb = MBX_0;
1531 mcp->tov = MBX_TOV_SECONDS;
1532 mcp->flags = 0;
1533 rval = qla2x00_mailbox_command(vha, mcp);
1534 if (rval != QLA_SUCCESS) {
1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1536 "Failed=%x.\n", rval);
1537 }
1538
1539 /* Issue marker IOCB. */
1540 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1541 MK_SYNC_ID);
1542 if (rval2 != QLA_SUCCESS) {
1543 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1544 "Failed to issue marker IOCB (%x).\n", rval2);
1545 } else {
1546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1547 "Done %s.\n", __func__);
1548 }
1549
1550 return rval;
1551 }
1552
1553 int
1554 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1555 {
1556 int rval, rval2;
1557 mbx_cmd_t mc;
1558 mbx_cmd_t *mcp = &mc;
1559 scsi_qla_host_t *vha;
1560
1561 vha = fcport->vha;
1562
1563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1564 "Entered %s.\n", __func__);
1565
1566 mcp->mb[0] = MBC_LUN_RESET;
1567 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1568 if (HAS_EXTENDED_IDS(vha->hw))
1569 mcp->mb[1] = fcport->loop_id;
1570 else
1571 mcp->mb[1] = fcport->loop_id << 8;
1572 mcp->mb[2] = (u32)l;
1573 mcp->mb[3] = 0;
1574 mcp->mb[9] = vha->vp_idx;
1575
1576 mcp->in_mb = MBX_0;
1577 mcp->tov = MBX_TOV_SECONDS;
1578 mcp->flags = 0;
1579 rval = qla2x00_mailbox_command(vha, mcp);
1580 if (rval != QLA_SUCCESS) {
1581 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1582 }
1583
1584 /* Issue marker IOCB. */
1585 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1586 MK_SYNC_ID_LUN);
1587 if (rval2 != QLA_SUCCESS) {
1588 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1589 "Failed to issue marker IOCB (%x).\n", rval2);
1590 } else {
1591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1592 "Done %s.\n", __func__);
1593 }
1594
1595 return rval;
1596 }
1597
1598 /*
1599 * qla2x00_get_adapter_id
1600 * Get adapter ID and topology.
1601 *
1602 * Input:
1603 * ha = adapter block pointer.
1604 * id = pointer for loop ID.
1605 * al_pa = pointer for AL_PA.
1606 * area = pointer for area.
1607 * domain = pointer for domain.
1608 * top = pointer for topology.
1609 * TARGET_QUEUE_LOCK must be released.
1610 * ADAPTER_STATE_LOCK must be released.
1611 *
1612 * Returns:
1613 * qla2x00 local function return status code.
1614 *
1615 * Context:
1616 * Kernel context.
1617 */
1618 int
1619 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1620 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1621 {
1622 int rval;
1623 mbx_cmd_t mc;
1624 mbx_cmd_t *mcp = &mc;
1625
1626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1627 "Entered %s.\n", __func__);
1628
1629 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1630 mcp->mb[9] = vha->vp_idx;
1631 mcp->out_mb = MBX_9|MBX_0;
1632 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1633 if (IS_CNA_CAPABLE(vha->hw))
1634 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1635 if (IS_FWI2_CAPABLE(vha->hw))
1636 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1637 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1638 mcp->in_mb |= MBX_15;
1639 mcp->tov = MBX_TOV_SECONDS;
1640 mcp->flags = 0;
1641 rval = qla2x00_mailbox_command(vha, mcp);
1642 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1643 rval = QLA_COMMAND_ERROR;
1644 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1645 rval = QLA_INVALID_COMMAND;
1646
1647 /* Return data. */
1648 *id = mcp->mb[1];
1649 *al_pa = LSB(mcp->mb[2]);
1650 *area = MSB(mcp->mb[2]);
1651 *domain = LSB(mcp->mb[3]);
1652 *top = mcp->mb[6];
1653 *sw_cap = mcp->mb[7];
1654
1655 if (rval != QLA_SUCCESS) {
1656 /*EMPTY*/
1657 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1658 } else {
1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1660 "Done %s.\n", __func__);
1661
1662 if (IS_CNA_CAPABLE(vha->hw)) {
1663 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1664 vha->fcoe_fcf_idx = mcp->mb[10];
1665 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1666 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1667 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1668 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1669 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1670 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1671 }
1672 /* If FA-WWN supported */
1673 if (IS_FAWWN_CAPABLE(vha->hw)) {
1674 if (mcp->mb[7] & BIT_14) {
1675 vha->port_name[0] = MSB(mcp->mb[16]);
1676 vha->port_name[1] = LSB(mcp->mb[16]);
1677 vha->port_name[2] = MSB(mcp->mb[17]);
1678 vha->port_name[3] = LSB(mcp->mb[17]);
1679 vha->port_name[4] = MSB(mcp->mb[18]);
1680 vha->port_name[5] = LSB(mcp->mb[18]);
1681 vha->port_name[6] = MSB(mcp->mb[19]);
1682 vha->port_name[7] = LSB(mcp->mb[19]);
1683 fc_host_port_name(vha->host) =
1684 wwn_to_u64(vha->port_name);
1685 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1686 "FA-WWN acquired %016llx\n",
1687 wwn_to_u64(vha->port_name));
1688 }
1689 }
1690
1691 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1692 vha->bbcr = mcp->mb[15];
1693 }
1694
1695 return rval;
1696 }
1697
1698 /*
1699 * qla2x00_get_retry_cnt
1700 * Get current firmware login retry count and delay.
1701 *
1702 * Input:
1703 * ha = adapter block pointer.
1704 * retry_cnt = pointer to login retry count.
1705 * tov = pointer to login timeout value.
1706 *
1707 * Returns:
1708 * qla2x00 local function return status code.
1709 *
1710 * Context:
1711 * Kernel context.
1712 */
1713 int
1714 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1715 uint16_t *r_a_tov)
1716 {
1717 int rval;
1718 uint16_t ratov;
1719 mbx_cmd_t mc;
1720 mbx_cmd_t *mcp = &mc;
1721
1722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1723 "Entered %s.\n", __func__);
1724
1725 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1726 mcp->out_mb = MBX_0;
1727 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1728 mcp->tov = MBX_TOV_SECONDS;
1729 mcp->flags = 0;
1730 rval = qla2x00_mailbox_command(vha, mcp);
1731
1732 if (rval != QLA_SUCCESS) {
1733 /*EMPTY*/
1734 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1735 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1736 } else {
1737 /* Convert returned data and check our values. */
1738 *r_a_tov = mcp->mb[3] / 2;
1739 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1740 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1741 /* Update to the larger values */
1742 *retry_cnt = (uint8_t)mcp->mb[1];
1743 *tov = ratov;
1744 }
1745
1746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1747 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1748 }
1749
1750 return rval;
1751 }
1752
1753 /*
1754 * qla2x00_init_firmware
1755 * Initialize adapter firmware.
1756 *
1757 * Input:
1758 * ha = adapter block pointer.
1759 * dptr = Initialization control block pointer.
1760 * size = size of initialization control block.
1761 * TARGET_QUEUE_LOCK must be released.
1762 * ADAPTER_STATE_LOCK must be released.
1763 *
1764 * Returns:
1765 * qla2x00 local function return status code.
1766 *
1767 * Context:
1768 * Kernel context.
1769 */
1770 int
1771 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1772 {
1773 int rval;
1774 mbx_cmd_t mc;
1775 mbx_cmd_t *mcp = &mc;
1776 struct qla_hw_data *ha = vha->hw;
1777
1778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1779 "Entered %s.\n", __func__);
1780
1781 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1782 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1783 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1784
1785 if (ha->flags.npiv_supported)
1786 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1787 else
1788 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1789
1790 mcp->mb[1] = 0;
1791 mcp->mb[2] = MSW(ha->init_cb_dma);
1792 mcp->mb[3] = LSW(ha->init_cb_dma);
1793 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1794 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1795 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1796 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1797 mcp->mb[1] = BIT_0;
1798 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1799 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1800 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1801 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1802 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1803 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1804 }
1805 /* 1 and 2 should normally be captured. */
1806 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1807 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1808 /* mb3 is additional info about the installed SFP. */
1809 mcp->in_mb |= MBX_3;
1810 mcp->buf_size = size;
1811 mcp->flags = MBX_DMA_OUT;
1812 mcp->tov = MBX_TOV_SECONDS;
1813 rval = qla2x00_mailbox_command(vha, mcp);
1814
1815 if (rval != QLA_SUCCESS) {
1816 /*EMPTY*/
1817 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1818 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1819 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1820 if (ha->init_cb) {
1821 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1822 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1823 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1824 }
1825 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1826 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1827 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1828 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1829 }
1830 } else {
1831 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1832 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1833 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1834 "Invalid SFP/Validation Failed\n");
1835 }
1836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1837 "Done %s.\n", __func__);
1838 }
1839
1840 return rval;
1841 }
1842
1843
1844 /*
1845 * qla2x00_get_port_database
1846 * Issue normal/enhanced get port database mailbox command
1847 * and copy device name as necessary.
1848 *
1849 * Input:
1850 * ha = adapter state pointer.
1851 * dev = structure pointer.
1852 * opt = enhanced cmd option byte.
1853 *
1854 * Returns:
1855 * qla2x00 local function return status code.
1856 *
1857 * Context:
1858 * Kernel context.
1859 */
1860 int
1861 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1862 {
1863 int rval;
1864 mbx_cmd_t mc;
1865 mbx_cmd_t *mcp = &mc;
1866 port_database_t *pd;
1867 struct port_database_24xx *pd24;
1868 dma_addr_t pd_dma;
1869 struct qla_hw_data *ha = vha->hw;
1870
1871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1872 "Entered %s.\n", __func__);
1873
1874 pd24 = NULL;
1875 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1876 if (pd == NULL) {
1877 ql_log(ql_log_warn, vha, 0x1050,
1878 "Failed to allocate port database structure.\n");
1879 fcport->query = 0;
1880 return QLA_MEMORY_ALLOC_FAILED;
1881 }
1882
1883 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1884 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1885 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1886 mcp->mb[2] = MSW(pd_dma);
1887 mcp->mb[3] = LSW(pd_dma);
1888 mcp->mb[6] = MSW(MSD(pd_dma));
1889 mcp->mb[7] = LSW(MSD(pd_dma));
1890 mcp->mb[9] = vha->vp_idx;
1891 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1892 mcp->in_mb = MBX_0;
1893 if (IS_FWI2_CAPABLE(ha)) {
1894 mcp->mb[1] = fcport->loop_id;
1895 mcp->mb[10] = opt;
1896 mcp->out_mb |= MBX_10|MBX_1;
1897 mcp->in_mb |= MBX_1;
1898 } else if (HAS_EXTENDED_IDS(ha)) {
1899 mcp->mb[1] = fcport->loop_id;
1900 mcp->mb[10] = opt;
1901 mcp->out_mb |= MBX_10|MBX_1;
1902 } else {
1903 mcp->mb[1] = fcport->loop_id << 8 | opt;
1904 mcp->out_mb |= MBX_1;
1905 }
1906 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1907 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1908 mcp->flags = MBX_DMA_IN;
1909 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1910 rval = qla2x00_mailbox_command(vha, mcp);
1911 if (rval != QLA_SUCCESS)
1912 goto gpd_error_out;
1913
1914 if (IS_FWI2_CAPABLE(ha)) {
1915 uint64_t zero = 0;
1916 u8 current_login_state, last_login_state;
1917
1918 pd24 = (struct port_database_24xx *) pd;
1919
1920 /* Check for logged in state. */
1921 if (NVME_TARGET(ha, fcport)) {
1922 current_login_state = pd24->current_login_state >> 4;
1923 last_login_state = pd24->last_login_state >> 4;
1924 } else {
1925 current_login_state = pd24->current_login_state & 0xf;
1926 last_login_state = pd24->last_login_state & 0xf;
1927 }
1928 fcport->current_login_state = pd24->current_login_state;
1929 fcport->last_login_state = pd24->last_login_state;
1930
1931 /* Check for logged in state. */
1932 if (current_login_state != PDS_PRLI_COMPLETE &&
1933 last_login_state != PDS_PRLI_COMPLETE) {
1934 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1935 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1936 current_login_state, last_login_state,
1937 fcport->loop_id);
1938 rval = QLA_FUNCTION_FAILED;
1939
1940 if (!fcport->query)
1941 goto gpd_error_out;
1942 }
1943
1944 if (fcport->loop_id == FC_NO_LOOP_ID ||
1945 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1946 memcmp(fcport->port_name, pd24->port_name, 8))) {
1947 /* We lost the device mid way. */
1948 rval = QLA_NOT_LOGGED_IN;
1949 goto gpd_error_out;
1950 }
1951
1952 /* Names are little-endian. */
1953 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1954 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1955
1956 /* Get port_id of device. */
1957 fcport->d_id.b.domain = pd24->port_id[0];
1958 fcport->d_id.b.area = pd24->port_id[1];
1959 fcport->d_id.b.al_pa = pd24->port_id[2];
1960 fcport->d_id.b.rsvd_1 = 0;
1961
1962 /* If not target must be initiator or unknown type. */
1963 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1964 fcport->port_type = FCT_INITIATOR;
1965 else
1966 fcport->port_type = FCT_TARGET;
1967
1968 /* Passback COS information. */
1969 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1970 FC_COS_CLASS2 : FC_COS_CLASS3;
1971
1972 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1973 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1974 } else {
1975 uint64_t zero = 0;
1976
1977 /* Check for logged in state. */
1978 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1979 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1980 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1981 "Unable to verify login-state (%x/%x) - "
1982 "portid=%02x%02x%02x.\n", pd->master_state,
1983 pd->slave_state, fcport->d_id.b.domain,
1984 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1985 rval = QLA_FUNCTION_FAILED;
1986 goto gpd_error_out;
1987 }
1988
1989 if (fcport->loop_id == FC_NO_LOOP_ID ||
1990 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1991 memcmp(fcport->port_name, pd->port_name, 8))) {
1992 /* We lost the device mid way. */
1993 rval = QLA_NOT_LOGGED_IN;
1994 goto gpd_error_out;
1995 }
1996
1997 /* Names are little-endian. */
1998 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1999 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2000
2001 /* Get port_id of device. */
2002 fcport->d_id.b.domain = pd->port_id[0];
2003 fcport->d_id.b.area = pd->port_id[3];
2004 fcport->d_id.b.al_pa = pd->port_id[2];
2005 fcport->d_id.b.rsvd_1 = 0;
2006
2007 /* If not target must be initiator or unknown type. */
2008 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2009 fcport->port_type = FCT_INITIATOR;
2010 else
2011 fcport->port_type = FCT_TARGET;
2012
2013 /* Passback COS information. */
2014 fcport->supported_classes = (pd->options & BIT_4) ?
2015 FC_COS_CLASS2 : FC_COS_CLASS3;
2016 }
2017
2018 gpd_error_out:
2019 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2020 fcport->query = 0;
2021
2022 if (rval != QLA_SUCCESS) {
2023 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2024 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2025 mcp->mb[0], mcp->mb[1]);
2026 } else {
2027 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2028 "Done %s.\n", __func__);
2029 }
2030
2031 return rval;
2032 }
2033
2034 int
2035 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2036 struct port_database_24xx *pdb)
2037 {
2038 mbx_cmd_t mc;
2039 mbx_cmd_t *mcp = &mc;
2040 dma_addr_t pdb_dma;
2041 int rval;
2042
2043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2044 "Entered %s.\n", __func__);
2045
2046 memset(pdb, 0, sizeof(*pdb));
2047
2048 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2049 sizeof(*pdb), DMA_FROM_DEVICE);
2050 if (!pdb_dma) {
2051 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2052 return QLA_MEMORY_ALLOC_FAILED;
2053 }
2054
2055 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2056 mcp->mb[1] = nport_handle;
2057 mcp->mb[2] = MSW(LSD(pdb_dma));
2058 mcp->mb[3] = LSW(LSD(pdb_dma));
2059 mcp->mb[6] = MSW(MSD(pdb_dma));
2060 mcp->mb[7] = LSW(MSD(pdb_dma));
2061 mcp->mb[9] = 0;
2062 mcp->mb[10] = 0;
2063 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2064 mcp->in_mb = MBX_1|MBX_0;
2065 mcp->buf_size = sizeof(*pdb);
2066 mcp->flags = MBX_DMA_IN;
2067 mcp->tov = vha->hw->login_timeout * 2;
2068 rval = qla2x00_mailbox_command(vha, mcp);
2069
2070 if (rval != QLA_SUCCESS) {
2071 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2072 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2073 rval, mcp->mb[0], mcp->mb[1]);
2074 } else {
2075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2076 "Done %s.\n", __func__);
2077 }
2078
2079 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2080 sizeof(*pdb), DMA_FROM_DEVICE);
2081
2082 return rval;
2083 }
2084
2085 /*
2086 * qla2x00_get_firmware_state
2087 * Get adapter firmware state.
2088 *
2089 * Input:
2090 * ha = adapter block pointer.
2091 * dptr = pointer for firmware state.
2092 * TARGET_QUEUE_LOCK must be released.
2093 * ADAPTER_STATE_LOCK must be released.
2094 *
2095 * Returns:
2096 * qla2x00 local function return status code.
2097 *
2098 * Context:
2099 * Kernel context.
2100 */
2101 int
2102 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2103 {
2104 int rval;
2105 mbx_cmd_t mc;
2106 mbx_cmd_t *mcp = &mc;
2107 struct qla_hw_data *ha = vha->hw;
2108
2109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2110 "Entered %s.\n", __func__);
2111
2112 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2113 mcp->out_mb = MBX_0;
2114 if (IS_FWI2_CAPABLE(vha->hw))
2115 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2116 else
2117 mcp->in_mb = MBX_1|MBX_0;
2118 mcp->tov = MBX_TOV_SECONDS;
2119 mcp->flags = 0;
2120 rval = qla2x00_mailbox_command(vha, mcp);
2121
2122 /* Return firmware states. */
2123 states[0] = mcp->mb[1];
2124 if (IS_FWI2_CAPABLE(vha->hw)) {
2125 states[1] = mcp->mb[2];
2126 states[2] = mcp->mb[3]; /* SFP info */
2127 states[3] = mcp->mb[4];
2128 states[4] = mcp->mb[5];
2129 states[5] = mcp->mb[6]; /* DPORT status */
2130 }
2131
2132 if (rval != QLA_SUCCESS) {
2133 /*EMPTY*/
2134 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2135 } else {
2136 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2137 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2138 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2139 "Invalid SFP/Validation Failed\n");
2140 }
2141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2142 "Done %s.\n", __func__);
2143 }
2144
2145 return rval;
2146 }
2147
2148 /*
2149 * qla2x00_get_port_name
2150 * Issue get port name mailbox command.
2151 * Returned name is in big endian format.
2152 *
2153 * Input:
2154 * ha = adapter block pointer.
2155 * loop_id = loop ID of device.
2156 * name = pointer for name.
2157 * TARGET_QUEUE_LOCK must be released.
2158 * ADAPTER_STATE_LOCK must be released.
2159 *
2160 * Returns:
2161 * qla2x00 local function return status code.
2162 *
2163 * Context:
2164 * Kernel context.
2165 */
2166 int
2167 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2168 uint8_t opt)
2169 {
2170 int rval;
2171 mbx_cmd_t mc;
2172 mbx_cmd_t *mcp = &mc;
2173
2174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2175 "Entered %s.\n", __func__);
2176
2177 mcp->mb[0] = MBC_GET_PORT_NAME;
2178 mcp->mb[9] = vha->vp_idx;
2179 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2180 if (HAS_EXTENDED_IDS(vha->hw)) {
2181 mcp->mb[1] = loop_id;
2182 mcp->mb[10] = opt;
2183 mcp->out_mb |= MBX_10;
2184 } else {
2185 mcp->mb[1] = loop_id << 8 | opt;
2186 }
2187
2188 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2189 mcp->tov = MBX_TOV_SECONDS;
2190 mcp->flags = 0;
2191 rval = qla2x00_mailbox_command(vha, mcp);
2192
2193 if (rval != QLA_SUCCESS) {
2194 /*EMPTY*/
2195 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2196 } else {
2197 if (name != NULL) {
2198 /* This function returns name in big endian. */
2199 name[0] = MSB(mcp->mb[2]);
2200 name[1] = LSB(mcp->mb[2]);
2201 name[2] = MSB(mcp->mb[3]);
2202 name[3] = LSB(mcp->mb[3]);
2203 name[4] = MSB(mcp->mb[6]);
2204 name[5] = LSB(mcp->mb[6]);
2205 name[6] = MSB(mcp->mb[7]);
2206 name[7] = LSB(mcp->mb[7]);
2207 }
2208
2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2210 "Done %s.\n", __func__);
2211 }
2212
2213 return rval;
2214 }
2215
2216 /*
2217 * qla24xx_link_initialization
2218 * Issue link initialization mailbox command.
2219 *
2220 * Input:
2221 * ha = adapter block pointer.
2222 * TARGET_QUEUE_LOCK must be released.
2223 * ADAPTER_STATE_LOCK must be released.
2224 *
2225 * Returns:
2226 * qla2x00 local function return status code.
2227 *
2228 * Context:
2229 * Kernel context.
2230 */
2231 int
2232 qla24xx_link_initialize(scsi_qla_host_t *vha)
2233 {
2234 int rval;
2235 mbx_cmd_t mc;
2236 mbx_cmd_t *mcp = &mc;
2237
2238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2239 "Entered %s.\n", __func__);
2240
2241 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2242 return QLA_FUNCTION_FAILED;
2243
2244 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2245 mcp->mb[1] = BIT_4;
2246 if (vha->hw->operating_mode == LOOP)
2247 mcp->mb[1] |= BIT_6;
2248 else
2249 mcp->mb[1] |= BIT_5;
2250 mcp->mb[2] = 0;
2251 mcp->mb[3] = 0;
2252 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2253 mcp->in_mb = MBX_0;
2254 mcp->tov = MBX_TOV_SECONDS;
2255 mcp->flags = 0;
2256 rval = qla2x00_mailbox_command(vha, mcp);
2257
2258 if (rval != QLA_SUCCESS) {
2259 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2260 } else {
2261 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2262 "Done %s.\n", __func__);
2263 }
2264
2265 return rval;
2266 }
2267
2268 /*
2269 * qla2x00_lip_reset
2270 * Issue LIP reset mailbox command.
2271 *
2272 * Input:
2273 * ha = adapter block pointer.
2274 * TARGET_QUEUE_LOCK must be released.
2275 * ADAPTER_STATE_LOCK must be released.
2276 *
2277 * Returns:
2278 * qla2x00 local function return status code.
2279 *
2280 * Context:
2281 * Kernel context.
2282 */
2283 int
2284 qla2x00_lip_reset(scsi_qla_host_t *vha)
2285 {
2286 int rval;
2287 mbx_cmd_t mc;
2288 mbx_cmd_t *mcp = &mc;
2289
2290 ql_dbg(ql_dbg_disc, vha, 0x105a,
2291 "Entered %s.\n", __func__);
2292
2293 if (IS_CNA_CAPABLE(vha->hw)) {
2294 /* Logout across all FCFs. */
2295 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2296 mcp->mb[1] = BIT_1;
2297 mcp->mb[2] = 0;
2298 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2299 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2300 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2301 mcp->mb[1] = BIT_4;
2302 mcp->mb[2] = 0;
2303 mcp->mb[3] = vha->hw->loop_reset_delay;
2304 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2305 } else {
2306 mcp->mb[0] = MBC_LIP_RESET;
2307 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2308 if (HAS_EXTENDED_IDS(vha->hw)) {
2309 mcp->mb[1] = 0x00ff;
2310 mcp->mb[10] = 0;
2311 mcp->out_mb |= MBX_10;
2312 } else {
2313 mcp->mb[1] = 0xff00;
2314 }
2315 mcp->mb[2] = vha->hw->loop_reset_delay;
2316 mcp->mb[3] = 0;
2317 }
2318 mcp->in_mb = MBX_0;
2319 mcp->tov = MBX_TOV_SECONDS;
2320 mcp->flags = 0;
2321 rval = qla2x00_mailbox_command(vha, mcp);
2322
2323 if (rval != QLA_SUCCESS) {
2324 /*EMPTY*/
2325 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2326 } else {
2327 /*EMPTY*/
2328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2329 "Done %s.\n", __func__);
2330 }
2331
2332 return rval;
2333 }
2334
2335 /*
2336 * qla2x00_send_sns
2337 * Send SNS command.
2338 *
2339 * Input:
2340 * ha = adapter block pointer.
2341 * sns = pointer for command.
2342 * cmd_size = command size.
2343 * buf_size = response/command size.
2344 * TARGET_QUEUE_LOCK must be released.
2345 * ADAPTER_STATE_LOCK must be released.
2346 *
2347 * Returns:
2348 * qla2x00 local function return status code.
2349 *
2350 * Context:
2351 * Kernel context.
2352 */
2353 int
2354 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2355 uint16_t cmd_size, size_t buf_size)
2356 {
2357 int rval;
2358 mbx_cmd_t mc;
2359 mbx_cmd_t *mcp = &mc;
2360
2361 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2362 "Entered %s.\n", __func__);
2363
2364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2365 "Retry cnt=%d ratov=%d total tov=%d.\n",
2366 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2367
2368 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2369 mcp->mb[1] = cmd_size;
2370 mcp->mb[2] = MSW(sns_phys_address);
2371 mcp->mb[3] = LSW(sns_phys_address);
2372 mcp->mb[6] = MSW(MSD(sns_phys_address));
2373 mcp->mb[7] = LSW(MSD(sns_phys_address));
2374 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2375 mcp->in_mb = MBX_0|MBX_1;
2376 mcp->buf_size = buf_size;
2377 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2378 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2379 rval = qla2x00_mailbox_command(vha, mcp);
2380
2381 if (rval != QLA_SUCCESS) {
2382 /*EMPTY*/
2383 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2384 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2385 rval, mcp->mb[0], mcp->mb[1]);
2386 } else {
2387 /*EMPTY*/
2388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2389 "Done %s.\n", __func__);
2390 }
2391
2392 return rval;
2393 }
2394
2395 int
2396 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2397 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2398 {
2399 int rval;
2400
2401 struct logio_entry_24xx *lg;
2402 dma_addr_t lg_dma;
2403 uint32_t iop[2];
2404 struct qla_hw_data *ha = vha->hw;
2405 struct req_que *req;
2406
2407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2408 "Entered %s.\n", __func__);
2409
2410 if (vha->vp_idx && vha->qpair)
2411 req = vha->qpair->req;
2412 else
2413 req = ha->req_q_map[0];
2414
2415 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2416 if (lg == NULL) {
2417 ql_log(ql_log_warn, vha, 0x1062,
2418 "Failed to allocate login IOCB.\n");
2419 return QLA_MEMORY_ALLOC_FAILED;
2420 }
2421
2422 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2423 lg->entry_count = 1;
2424 lg->handle = make_handle(req->id, lg->handle);
2425 lg->nport_handle = cpu_to_le16(loop_id);
2426 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2427 if (opt & BIT_0)
2428 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2429 if (opt & BIT_1)
2430 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2431 lg->port_id[0] = al_pa;
2432 lg->port_id[1] = area;
2433 lg->port_id[2] = domain;
2434 lg->vp_index = vha->vp_idx;
2435 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2436 (ha->r_a_tov / 10 * 2) + 2);
2437 if (rval != QLA_SUCCESS) {
2438 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2439 "Failed to issue login IOCB (%x).\n", rval);
2440 } else if (lg->entry_status != 0) {
2441 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2442 "Failed to complete IOCB -- error status (%x).\n",
2443 lg->entry_status);
2444 rval = QLA_FUNCTION_FAILED;
2445 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2446 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2447 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2448
2449 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2450 "Failed to complete IOCB -- completion status (%x) "
2451 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2452 iop[0], iop[1]);
2453
2454 switch (iop[0]) {
2455 case LSC_SCODE_PORTID_USED:
2456 mb[0] = MBS_PORT_ID_USED;
2457 mb[1] = LSW(iop[1]);
2458 break;
2459 case LSC_SCODE_NPORT_USED:
2460 mb[0] = MBS_LOOP_ID_USED;
2461 break;
2462 case LSC_SCODE_NOLINK:
2463 case LSC_SCODE_NOIOCB:
2464 case LSC_SCODE_NOXCB:
2465 case LSC_SCODE_CMD_FAILED:
2466 case LSC_SCODE_NOFABRIC:
2467 case LSC_SCODE_FW_NOT_READY:
2468 case LSC_SCODE_NOT_LOGGED_IN:
2469 case LSC_SCODE_NOPCB:
2470 case LSC_SCODE_ELS_REJECT:
2471 case LSC_SCODE_CMD_PARAM_ERR:
2472 case LSC_SCODE_NONPORT:
2473 case LSC_SCODE_LOGGED_IN:
2474 case LSC_SCODE_NOFLOGI_ACC:
2475 default:
2476 mb[0] = MBS_COMMAND_ERROR;
2477 break;
2478 }
2479 } else {
2480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2481 "Done %s.\n", __func__);
2482
2483 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2484
2485 mb[0] = MBS_COMMAND_COMPLETE;
2486 mb[1] = 0;
2487 if (iop[0] & BIT_4) {
2488 if (iop[0] & BIT_8)
2489 mb[1] |= BIT_1;
2490 } else
2491 mb[1] = BIT_0;
2492
2493 /* Passback COS information. */
2494 mb[10] = 0;
2495 if (lg->io_parameter[7] || lg->io_parameter[8])
2496 mb[10] |= BIT_0; /* Class 2. */
2497 if (lg->io_parameter[9] || lg->io_parameter[10])
2498 mb[10] |= BIT_1; /* Class 3. */
2499 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2500 mb[10] |= BIT_7; /* Confirmed Completion
2501 * Allowed
2502 */
2503 }
2504
2505 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2506
2507 return rval;
2508 }
2509
2510 /*
2511 * qla2x00_login_fabric
2512 * Issue login fabric port mailbox command.
2513 *
2514 * Input:
2515 * ha = adapter block pointer.
2516 * loop_id = device loop ID.
2517 * domain = device domain.
2518 * area = device area.
2519 * al_pa = device AL_PA.
2520 * status = pointer for return status.
2521 * opt = command options.
2522 * TARGET_QUEUE_LOCK must be released.
2523 * ADAPTER_STATE_LOCK must be released.
2524 *
2525 * Returns:
2526 * qla2x00 local function return status code.
2527 *
2528 * Context:
2529 * Kernel context.
2530 */
2531 int
2532 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2533 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2534 {
2535 int rval;
2536 mbx_cmd_t mc;
2537 mbx_cmd_t *mcp = &mc;
2538 struct qla_hw_data *ha = vha->hw;
2539
2540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2541 "Entered %s.\n", __func__);
2542
2543 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2544 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2545 if (HAS_EXTENDED_IDS(ha)) {
2546 mcp->mb[1] = loop_id;
2547 mcp->mb[10] = opt;
2548 mcp->out_mb |= MBX_10;
2549 } else {
2550 mcp->mb[1] = (loop_id << 8) | opt;
2551 }
2552 mcp->mb[2] = domain;
2553 mcp->mb[3] = area << 8 | al_pa;
2554
2555 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2556 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2557 mcp->flags = 0;
2558 rval = qla2x00_mailbox_command(vha, mcp);
2559
2560 /* Return mailbox statuses. */
2561 if (mb != NULL) {
2562 mb[0] = mcp->mb[0];
2563 mb[1] = mcp->mb[1];
2564 mb[2] = mcp->mb[2];
2565 mb[6] = mcp->mb[6];
2566 mb[7] = mcp->mb[7];
2567 /* COS retrieved from Get-Port-Database mailbox command. */
2568 mb[10] = 0;
2569 }
2570
2571 if (rval != QLA_SUCCESS) {
2572 /* RLU tmp code: need to change main mailbox_command function to
2573 * return ok even when the mailbox completion value is not
2574 * SUCCESS. The caller needs to be responsible to interpret
2575 * the return values of this mailbox command if we're not
2576 * to change too much of the existing code.
2577 */
2578 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2579 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2580 mcp->mb[0] == 0x4006)
2581 rval = QLA_SUCCESS;
2582
2583 /*EMPTY*/
2584 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2585 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2586 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2587 } else {
2588 /*EMPTY*/
2589 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2590 "Done %s.\n", __func__);
2591 }
2592
2593 return rval;
2594 }
2595
2596 /*
2597 * qla2x00_login_local_device
2598 * Issue login loop port mailbox command.
2599 *
2600 * Input:
2601 * ha = adapter block pointer.
2602 * loop_id = device loop ID.
2603 * opt = command options.
2604 *
2605 * Returns:
2606 * Return status code.
2607 *
2608 * Context:
2609 * Kernel context.
2610 *
2611 */
2612 int
2613 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2614 uint16_t *mb_ret, uint8_t opt)
2615 {
2616 int rval;
2617 mbx_cmd_t mc;
2618 mbx_cmd_t *mcp = &mc;
2619 struct qla_hw_data *ha = vha->hw;
2620
2621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2622 "Entered %s.\n", __func__);
2623
2624 if (IS_FWI2_CAPABLE(ha))
2625 return qla24xx_login_fabric(vha, fcport->loop_id,
2626 fcport->d_id.b.domain, fcport->d_id.b.area,
2627 fcport->d_id.b.al_pa, mb_ret, opt);
2628
2629 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2630 if (HAS_EXTENDED_IDS(ha))
2631 mcp->mb[1] = fcport->loop_id;
2632 else
2633 mcp->mb[1] = fcport->loop_id << 8;
2634 mcp->mb[2] = opt;
2635 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2636 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2637 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2638 mcp->flags = 0;
2639 rval = qla2x00_mailbox_command(vha, mcp);
2640
2641 /* Return mailbox statuses. */
2642 if (mb_ret != NULL) {
2643 mb_ret[0] = mcp->mb[0];
2644 mb_ret[1] = mcp->mb[1];
2645 mb_ret[6] = mcp->mb[6];
2646 mb_ret[7] = mcp->mb[7];
2647 }
2648
2649 if (rval != QLA_SUCCESS) {
2650 /* AV tmp code: need to change main mailbox_command function to
2651 * return ok even when the mailbox completion value is not
2652 * SUCCESS. The caller needs to be responsible to interpret
2653 * the return values of this mailbox command if we're not
2654 * to change too much of the existing code.
2655 */
2656 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2657 rval = QLA_SUCCESS;
2658
2659 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2660 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2661 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2662 } else {
2663 /*EMPTY*/
2664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2665 "Done %s.\n", __func__);
2666 }
2667
2668 return (rval);
2669 }
2670
2671 int
2672 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2673 uint8_t area, uint8_t al_pa)
2674 {
2675 int rval;
2676 struct logio_entry_24xx *lg;
2677 dma_addr_t lg_dma;
2678 struct qla_hw_data *ha = vha->hw;
2679 struct req_que *req;
2680
2681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2682 "Entered %s.\n", __func__);
2683
2684 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2685 if (lg == NULL) {
2686 ql_log(ql_log_warn, vha, 0x106e,
2687 "Failed to allocate logout IOCB.\n");
2688 return QLA_MEMORY_ALLOC_FAILED;
2689 }
2690
2691 req = vha->req;
2692 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2693 lg->entry_count = 1;
2694 lg->handle = make_handle(req->id, lg->handle);
2695 lg->nport_handle = cpu_to_le16(loop_id);
2696 lg->control_flags =
2697 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2698 LCF_FREE_NPORT);
2699 lg->port_id[0] = al_pa;
2700 lg->port_id[1] = area;
2701 lg->port_id[2] = domain;
2702 lg->vp_index = vha->vp_idx;
2703 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2704 (ha->r_a_tov / 10 * 2) + 2);
2705 if (rval != QLA_SUCCESS) {
2706 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2707 "Failed to issue logout IOCB (%x).\n", rval);
2708 } else if (lg->entry_status != 0) {
2709 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2710 "Failed to complete IOCB -- error status (%x).\n",
2711 lg->entry_status);
2712 rval = QLA_FUNCTION_FAILED;
2713 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2714 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2715 "Failed to complete IOCB -- completion status (%x) "
2716 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2717 le32_to_cpu(lg->io_parameter[0]),
2718 le32_to_cpu(lg->io_parameter[1]));
2719 } else {
2720 /*EMPTY*/
2721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2722 "Done %s.\n", __func__);
2723 }
2724
2725 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2726
2727 return rval;
2728 }
2729
2730 /*
2731 * qla2x00_fabric_logout
2732 * Issue logout fabric port mailbox command.
2733 *
2734 * Input:
2735 * ha = adapter block pointer.
2736 * loop_id = device loop ID.
2737 * TARGET_QUEUE_LOCK must be released.
2738 * ADAPTER_STATE_LOCK must be released.
2739 *
2740 * Returns:
2741 * qla2x00 local function return status code.
2742 *
2743 * Context:
2744 * Kernel context.
2745 */
2746 int
2747 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2748 uint8_t area, uint8_t al_pa)
2749 {
2750 int rval;
2751 mbx_cmd_t mc;
2752 mbx_cmd_t *mcp = &mc;
2753
2754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2755 "Entered %s.\n", __func__);
2756
2757 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2758 mcp->out_mb = MBX_1|MBX_0;
2759 if (HAS_EXTENDED_IDS(vha->hw)) {
2760 mcp->mb[1] = loop_id;
2761 mcp->mb[10] = 0;
2762 mcp->out_mb |= MBX_10;
2763 } else {
2764 mcp->mb[1] = loop_id << 8;
2765 }
2766
2767 mcp->in_mb = MBX_1|MBX_0;
2768 mcp->tov = MBX_TOV_SECONDS;
2769 mcp->flags = 0;
2770 rval = qla2x00_mailbox_command(vha, mcp);
2771
2772 if (rval != QLA_SUCCESS) {
2773 /*EMPTY*/
2774 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2775 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2776 } else {
2777 /*EMPTY*/
2778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2779 "Done %s.\n", __func__);
2780 }
2781
2782 return rval;
2783 }
2784
2785 /*
2786 * qla2x00_full_login_lip
2787 * Issue full login LIP mailbox command.
2788 *
2789 * Input:
2790 * ha = adapter block pointer.
2791 * TARGET_QUEUE_LOCK must be released.
2792 * ADAPTER_STATE_LOCK must be released.
2793 *
2794 * Returns:
2795 * qla2x00 local function return status code.
2796 *
2797 * Context:
2798 * Kernel context.
2799 */
2800 int
2801 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2802 {
2803 int rval;
2804 mbx_cmd_t mc;
2805 mbx_cmd_t *mcp = &mc;
2806
2807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2808 "Entered %s.\n", __func__);
2809
2810 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2811 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2812 mcp->mb[2] = 0;
2813 mcp->mb[3] = 0;
2814 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2815 mcp->in_mb = MBX_0;
2816 mcp->tov = MBX_TOV_SECONDS;
2817 mcp->flags = 0;
2818 rval = qla2x00_mailbox_command(vha, mcp);
2819
2820 if (rval != QLA_SUCCESS) {
2821 /*EMPTY*/
2822 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2823 } else {
2824 /*EMPTY*/
2825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2826 "Done %s.\n", __func__);
2827 }
2828
2829 return rval;
2830 }
2831
2832 /*
2833 * qla2x00_get_id_list
2834 *
2835 * Input:
2836 * ha = adapter block pointer.
2837 *
2838 * Returns:
2839 * qla2x00 local function return status code.
2840 *
2841 * Context:
2842 * Kernel context.
2843 */
2844 int
2845 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2846 uint16_t *entries)
2847 {
2848 int rval;
2849 mbx_cmd_t mc;
2850 mbx_cmd_t *mcp = &mc;
2851
2852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2853 "Entered %s.\n", __func__);
2854
2855 if (id_list == NULL)
2856 return QLA_FUNCTION_FAILED;
2857
2858 mcp->mb[0] = MBC_GET_ID_LIST;
2859 mcp->out_mb = MBX_0;
2860 if (IS_FWI2_CAPABLE(vha->hw)) {
2861 mcp->mb[2] = MSW(id_list_dma);
2862 mcp->mb[3] = LSW(id_list_dma);
2863 mcp->mb[6] = MSW(MSD(id_list_dma));
2864 mcp->mb[7] = LSW(MSD(id_list_dma));
2865 mcp->mb[8] = 0;
2866 mcp->mb[9] = vha->vp_idx;
2867 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2868 } else {
2869 mcp->mb[1] = MSW(id_list_dma);
2870 mcp->mb[2] = LSW(id_list_dma);
2871 mcp->mb[3] = MSW(MSD(id_list_dma));
2872 mcp->mb[6] = LSW(MSD(id_list_dma));
2873 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2874 }
2875 mcp->in_mb = MBX_1|MBX_0;
2876 mcp->tov = MBX_TOV_SECONDS;
2877 mcp->flags = 0;
2878 rval = qla2x00_mailbox_command(vha, mcp);
2879
2880 if (rval != QLA_SUCCESS) {
2881 /*EMPTY*/
2882 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2883 } else {
2884 *entries = mcp->mb[1];
2885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2886 "Done %s.\n", __func__);
2887 }
2888
2889 return rval;
2890 }
2891
2892 /*
2893 * qla2x00_get_resource_cnts
2894 * Get current firmware resource counts.
2895 *
2896 * Input:
2897 * ha = adapter block pointer.
2898 *
2899 * Returns:
2900 * qla2x00 local function return status code.
2901 *
2902 * Context:
2903 * Kernel context.
2904 */
2905 int
2906 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2907 {
2908 struct qla_hw_data *ha = vha->hw;
2909 int rval;
2910 mbx_cmd_t mc;
2911 mbx_cmd_t *mcp = &mc;
2912
2913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2914 "Entered %s.\n", __func__);
2915
2916 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2917 mcp->out_mb = MBX_0;
2918 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2919 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2920 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2921 mcp->in_mb |= MBX_12;
2922 mcp->tov = MBX_TOV_SECONDS;
2923 mcp->flags = 0;
2924 rval = qla2x00_mailbox_command(vha, mcp);
2925
2926 if (rval != QLA_SUCCESS) {
2927 /*EMPTY*/
2928 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2929 "Failed mb[0]=%x.\n", mcp->mb[0]);
2930 } else {
2931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2932 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2933 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2934 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2935 mcp->mb[11], mcp->mb[12]);
2936
2937 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2938 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2939 ha->cur_fw_xcb_count = mcp->mb[3];
2940 ha->orig_fw_xcb_count = mcp->mb[6];
2941 ha->cur_fw_iocb_count = mcp->mb[7];
2942 ha->orig_fw_iocb_count = mcp->mb[10];
2943 if (ha->flags.npiv_supported)
2944 ha->max_npiv_vports = mcp->mb[11];
2945 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2946 IS_QLA28XX(ha))
2947 ha->fw_max_fcf_count = mcp->mb[12];
2948 }
2949
2950 return (rval);
2951 }
2952
2953 /*
2954 * qla2x00_get_fcal_position_map
2955 * Get FCAL (LILP) position map using mailbox command
2956 *
2957 * Input:
2958 * ha = adapter state pointer.
2959 * pos_map = buffer pointer (can be NULL).
2960 *
2961 * Returns:
2962 * qla2x00 local function return status code.
2963 *
2964 * Context:
2965 * Kernel context.
2966 */
2967 int
2968 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2969 {
2970 int rval;
2971 mbx_cmd_t mc;
2972 mbx_cmd_t *mcp = &mc;
2973 char *pmap;
2974 dma_addr_t pmap_dma;
2975 struct qla_hw_data *ha = vha->hw;
2976
2977 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2978 "Entered %s.\n", __func__);
2979
2980 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2981 if (pmap == NULL) {
2982 ql_log(ql_log_warn, vha, 0x1080,
2983 "Memory alloc failed.\n");
2984 return QLA_MEMORY_ALLOC_FAILED;
2985 }
2986
2987 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2988 mcp->mb[2] = MSW(pmap_dma);
2989 mcp->mb[3] = LSW(pmap_dma);
2990 mcp->mb[6] = MSW(MSD(pmap_dma));
2991 mcp->mb[7] = LSW(MSD(pmap_dma));
2992 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2993 mcp->in_mb = MBX_1|MBX_0;
2994 mcp->buf_size = FCAL_MAP_SIZE;
2995 mcp->flags = MBX_DMA_IN;
2996 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2997 rval = qla2x00_mailbox_command(vha, mcp);
2998
2999 if (rval == QLA_SUCCESS) {
3000 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3001 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3002 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3003 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3004 pmap, pmap[0] + 1);
3005
3006 if (pos_map)
3007 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3008 }
3009 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3010
3011 if (rval != QLA_SUCCESS) {
3012 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3013 } else {
3014 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3015 "Done %s.\n", __func__);
3016 }
3017
3018 return rval;
3019 }
3020
3021 /*
3022 * qla2x00_get_link_status
3023 *
3024 * Input:
3025 * ha = adapter block pointer.
3026 * loop_id = device loop ID.
3027 * ret_buf = pointer to link status return buffer.
3028 *
3029 * Returns:
3030 * 0 = success.
3031 * BIT_0 = mem alloc error.
3032 * BIT_1 = mailbox error.
3033 */
3034 int
3035 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3036 struct link_statistics *stats, dma_addr_t stats_dma)
3037 {
3038 int rval;
3039 mbx_cmd_t mc;
3040 mbx_cmd_t *mcp = &mc;
3041 uint32_t *iter = (void *)stats;
3042 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3043 struct qla_hw_data *ha = vha->hw;
3044
3045 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3046 "Entered %s.\n", __func__);
3047
3048 mcp->mb[0] = MBC_GET_LINK_STATUS;
3049 mcp->mb[2] = MSW(LSD(stats_dma));
3050 mcp->mb[3] = LSW(LSD(stats_dma));
3051 mcp->mb[6] = MSW(MSD(stats_dma));
3052 mcp->mb[7] = LSW(MSD(stats_dma));
3053 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3054 mcp->in_mb = MBX_0;
3055 if (IS_FWI2_CAPABLE(ha)) {
3056 mcp->mb[1] = loop_id;
3057 mcp->mb[4] = 0;
3058 mcp->mb[10] = 0;
3059 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3060 mcp->in_mb |= MBX_1;
3061 } else if (HAS_EXTENDED_IDS(ha)) {
3062 mcp->mb[1] = loop_id;
3063 mcp->mb[10] = 0;
3064 mcp->out_mb |= MBX_10|MBX_1;
3065 } else {
3066 mcp->mb[1] = loop_id << 8;
3067 mcp->out_mb |= MBX_1;
3068 }
3069 mcp->tov = MBX_TOV_SECONDS;
3070 mcp->flags = IOCTL_CMD;
3071 rval = qla2x00_mailbox_command(vha, mcp);
3072
3073 if (rval == QLA_SUCCESS) {
3074 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3075 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3076 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3077 rval = QLA_FUNCTION_FAILED;
3078 } else {
3079 /* Re-endianize - firmware data is le32. */
3080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3081 "Done %s.\n", __func__);
3082 for ( ; dwords--; iter++)
3083 le32_to_cpus(iter);
3084 }
3085 } else {
3086 /* Failed. */
3087 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3088 }
3089
3090 return rval;
3091 }
3092
3093 int
3094 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3095 dma_addr_t stats_dma, uint16_t options)
3096 {
3097 int rval;
3098 mbx_cmd_t mc;
3099 mbx_cmd_t *mcp = &mc;
3100 uint32_t *iter = (void *)stats;
3101 ushort dwords = sizeof(*stats)/sizeof(*iter);
3102
3103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3104 "Entered %s.\n", __func__);
3105
3106 memset(&mc, 0, sizeof(mc));
3107 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3108 mc.mb[2] = MSW(LSD(stats_dma));
3109 mc.mb[3] = LSW(LSD(stats_dma));
3110 mc.mb[6] = MSW(MSD(stats_dma));
3111 mc.mb[7] = LSW(MSD(stats_dma));
3112 mc.mb[8] = dwords;
3113 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3114 mc.mb[10] = cpu_to_le16(options);
3115
3116 rval = qla24xx_send_mb_cmd(vha, &mc);
3117
3118 if (rval == QLA_SUCCESS) {
3119 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3120 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3121 "Failed mb[0]=%x.\n", mcp->mb[0]);
3122 rval = QLA_FUNCTION_FAILED;
3123 } else {
3124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3125 "Done %s.\n", __func__);
3126 /* Re-endianize - firmware data is le32. */
3127 for ( ; dwords--; iter++)
3128 le32_to_cpus(iter);
3129 }
3130 } else {
3131 /* Failed. */
3132 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3133 }
3134
3135 return rval;
3136 }
3137
3138 int
3139 qla24xx_abort_command(srb_t *sp)
3140 {
3141 int rval;
3142 unsigned long flags = 0;
3143
3144 struct abort_entry_24xx *abt;
3145 dma_addr_t abt_dma;
3146 uint32_t handle;
3147 fc_port_t *fcport = sp->fcport;
3148 struct scsi_qla_host *vha = fcport->vha;
3149 struct qla_hw_data *ha = vha->hw;
3150 struct req_que *req = vha->req;
3151 struct qla_qpair *qpair = sp->qpair;
3152
3153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3154 "Entered %s.\n", __func__);
3155
3156 if (vha->flags.qpairs_available && sp->qpair)
3157 req = sp->qpair->req;
3158 else
3159 return QLA_FUNCTION_FAILED;
3160
3161 if (ql2xasynctmfenable)
3162 return qla24xx_async_abort_command(sp);
3163
3164 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3165 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3166 if (req->outstanding_cmds[handle] == sp)
3167 break;
3168 }
3169 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3170 if (handle == req->num_outstanding_cmds) {
3171 /* Command not found. */
3172 return QLA_FUNCTION_FAILED;
3173 }
3174
3175 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3176 if (abt == NULL) {
3177 ql_log(ql_log_warn, vha, 0x108d,
3178 "Failed to allocate abort IOCB.\n");
3179 return QLA_MEMORY_ALLOC_FAILED;
3180 }
3181
3182 abt->entry_type = ABORT_IOCB_TYPE;
3183 abt->entry_count = 1;
3184 abt->handle = make_handle(req->id, abt->handle);
3185 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3186 abt->handle_to_abort = make_handle(req->id, handle);
3187 abt->port_id[0] = fcport->d_id.b.al_pa;
3188 abt->port_id[1] = fcport->d_id.b.area;
3189 abt->port_id[2] = fcport->d_id.b.domain;
3190 abt->vp_index = fcport->vha->vp_idx;
3191
3192 abt->req_que_no = cpu_to_le16(req->id);
3193
3194 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3195 if (rval != QLA_SUCCESS) {
3196 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3197 "Failed to issue IOCB (%x).\n", rval);
3198 } else if (abt->entry_status != 0) {
3199 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3200 "Failed to complete IOCB -- error status (%x).\n",
3201 abt->entry_status);
3202 rval = QLA_FUNCTION_FAILED;
3203 } else if (abt->nport_handle != cpu_to_le16(0)) {
3204 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3205 "Failed to complete IOCB -- completion status (%x).\n",
3206 le16_to_cpu(abt->nport_handle));
3207 if (abt->nport_handle == CS_IOCB_ERROR)
3208 rval = QLA_FUNCTION_PARAMETER_ERROR;
3209 else
3210 rval = QLA_FUNCTION_FAILED;
3211 } else {
3212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3213 "Done %s.\n", __func__);
3214 }
3215
3216 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3217
3218 return rval;
3219 }
3220
3221 struct tsk_mgmt_cmd {
3222 union {
3223 struct tsk_mgmt_entry tsk;
3224 struct sts_entry_24xx sts;
3225 } p;
3226 };
3227
3228 static int
3229 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3230 uint64_t l, int tag)
3231 {
3232 int rval, rval2;
3233 struct tsk_mgmt_cmd *tsk;
3234 struct sts_entry_24xx *sts;
3235 dma_addr_t tsk_dma;
3236 scsi_qla_host_t *vha;
3237 struct qla_hw_data *ha;
3238 struct req_que *req;
3239 struct qla_qpair *qpair;
3240
3241 vha = fcport->vha;
3242 ha = vha->hw;
3243 req = vha->req;
3244
3245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3246 "Entered %s.\n", __func__);
3247
3248 if (vha->vp_idx && vha->qpair) {
3249 /* NPIV port */
3250 qpair = vha->qpair;
3251 req = qpair->req;
3252 }
3253
3254 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3255 if (tsk == NULL) {
3256 ql_log(ql_log_warn, vha, 0x1093,
3257 "Failed to allocate task management IOCB.\n");
3258 return QLA_MEMORY_ALLOC_FAILED;
3259 }
3260
3261 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3262 tsk->p.tsk.entry_count = 1;
3263 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3264 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3265 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3266 tsk->p.tsk.control_flags = cpu_to_le32(type);
3267 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3268 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3269 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3270 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3271 if (type == TCF_LUN_RESET) {
3272 int_to_scsilun(l, &tsk->p.tsk.lun);
3273 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3274 sizeof(tsk->p.tsk.lun));
3275 }
3276
3277 sts = &tsk->p.sts;
3278 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3279 if (rval != QLA_SUCCESS) {
3280 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3281 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3282 } else if (sts->entry_status != 0) {
3283 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3284 "Failed to complete IOCB -- error status (%x).\n",
3285 sts->entry_status);
3286 rval = QLA_FUNCTION_FAILED;
3287 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3288 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3289 "Failed to complete IOCB -- completion status (%x).\n",
3290 le16_to_cpu(sts->comp_status));
3291 rval = QLA_FUNCTION_FAILED;
3292 } else if (le16_to_cpu(sts->scsi_status) &
3293 SS_RESPONSE_INFO_LEN_VALID) {
3294 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3296 "Ignoring inconsistent data length -- not enough "
3297 "response info (%d).\n",
3298 le32_to_cpu(sts->rsp_data_len));
3299 } else if (sts->data[3]) {
3300 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3301 "Failed to complete IOCB -- response (%x).\n",
3302 sts->data[3]);
3303 rval = QLA_FUNCTION_FAILED;
3304 }
3305 }
3306
3307 /* Issue marker IOCB. */
3308 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3309 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3310 if (rval2 != QLA_SUCCESS) {
3311 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3312 "Failed to issue marker IOCB (%x).\n", rval2);
3313 } else {
3314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3315 "Done %s.\n", __func__);
3316 }
3317
3318 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3319
3320 return rval;
3321 }
3322
3323 int
3324 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3325 {
3326 struct qla_hw_data *ha = fcport->vha->hw;
3327
3328 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3329 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3330
3331 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3332 }
3333
3334 int
3335 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3336 {
3337 struct qla_hw_data *ha = fcport->vha->hw;
3338
3339 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3340 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3341
3342 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3343 }
3344
3345 int
3346 qla2x00_system_error(scsi_qla_host_t *vha)
3347 {
3348 int rval;
3349 mbx_cmd_t mc;
3350 mbx_cmd_t *mcp = &mc;
3351 struct qla_hw_data *ha = vha->hw;
3352
3353 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3354 return QLA_FUNCTION_FAILED;
3355
3356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3357 "Entered %s.\n", __func__);
3358
3359 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3360 mcp->out_mb = MBX_0;
3361 mcp->in_mb = MBX_0;
3362 mcp->tov = 5;
3363 mcp->flags = 0;
3364 rval = qla2x00_mailbox_command(vha, mcp);
3365
3366 if (rval != QLA_SUCCESS) {
3367 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3368 } else {
3369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3370 "Done %s.\n", __func__);
3371 }
3372
3373 return rval;
3374 }
3375
3376 int
3377 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3378 {
3379 int rval;
3380 mbx_cmd_t mc;
3381 mbx_cmd_t *mcp = &mc;
3382
3383 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3384 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3385 return QLA_FUNCTION_FAILED;
3386
3387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3388 "Entered %s.\n", __func__);
3389
3390 mcp->mb[0] = MBC_WRITE_SERDES;
3391 mcp->mb[1] = addr;
3392 if (IS_QLA2031(vha->hw))
3393 mcp->mb[2] = data & 0xff;
3394 else
3395 mcp->mb[2] = data;
3396
3397 mcp->mb[3] = 0;
3398 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3399 mcp->in_mb = MBX_0;
3400 mcp->tov = MBX_TOV_SECONDS;
3401 mcp->flags = 0;
3402 rval = qla2x00_mailbox_command(vha, mcp);
3403
3404 if (rval != QLA_SUCCESS) {
3405 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3406 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3407 } else {
3408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3409 "Done %s.\n", __func__);
3410 }
3411
3412 return rval;
3413 }
3414
3415 int
3416 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3417 {
3418 int rval;
3419 mbx_cmd_t mc;
3420 mbx_cmd_t *mcp = &mc;
3421
3422 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3423 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3424 return QLA_FUNCTION_FAILED;
3425
3426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3427 "Entered %s.\n", __func__);
3428
3429 mcp->mb[0] = MBC_READ_SERDES;
3430 mcp->mb[1] = addr;
3431 mcp->mb[3] = 0;
3432 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3433 mcp->in_mb = MBX_1|MBX_0;
3434 mcp->tov = MBX_TOV_SECONDS;
3435 mcp->flags = 0;
3436 rval = qla2x00_mailbox_command(vha, mcp);
3437
3438 if (IS_QLA2031(vha->hw))
3439 *data = mcp->mb[1] & 0xff;
3440 else
3441 *data = mcp->mb[1];
3442
3443 if (rval != QLA_SUCCESS) {
3444 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3446 } else {
3447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3448 "Done %s.\n", __func__);
3449 }
3450
3451 return rval;
3452 }
3453
3454 int
3455 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3456 {
3457 int rval;
3458 mbx_cmd_t mc;
3459 mbx_cmd_t *mcp = &mc;
3460
3461 if (!IS_QLA8044(vha->hw))
3462 return QLA_FUNCTION_FAILED;
3463
3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3465 "Entered %s.\n", __func__);
3466
3467 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3468 mcp->mb[1] = HCS_WRITE_SERDES;
3469 mcp->mb[3] = LSW(addr);
3470 mcp->mb[4] = MSW(addr);
3471 mcp->mb[5] = LSW(data);
3472 mcp->mb[6] = MSW(data);
3473 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3474 mcp->in_mb = MBX_0;
3475 mcp->tov = MBX_TOV_SECONDS;
3476 mcp->flags = 0;
3477 rval = qla2x00_mailbox_command(vha, mcp);
3478
3479 if (rval != QLA_SUCCESS) {
3480 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3482 } else {
3483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3484 "Done %s.\n", __func__);
3485 }
3486
3487 return rval;
3488 }
3489
3490 int
3491 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3492 {
3493 int rval;
3494 mbx_cmd_t mc;
3495 mbx_cmd_t *mcp = &mc;
3496
3497 if (!IS_QLA8044(vha->hw))
3498 return QLA_FUNCTION_FAILED;
3499
3500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3501 "Entered %s.\n", __func__);
3502
3503 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3504 mcp->mb[1] = HCS_READ_SERDES;
3505 mcp->mb[3] = LSW(addr);
3506 mcp->mb[4] = MSW(addr);
3507 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3508 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3509 mcp->tov = MBX_TOV_SECONDS;
3510 mcp->flags = 0;
3511 rval = qla2x00_mailbox_command(vha, mcp);
3512
3513 *data = mcp->mb[2] << 16 | mcp->mb[1];
3514
3515 if (rval != QLA_SUCCESS) {
3516 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3517 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3518 } else {
3519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3520 "Done %s.\n", __func__);
3521 }
3522
3523 return rval;
3524 }
3525
3526 /**
3527 * qla2x00_set_serdes_params() -
3528 * @vha: HA context
3529 * @sw_em_1g: serial link options
3530 * @sw_em_2g: serial link options
3531 * @sw_em_4g: serial link options
3532 *
3533 * Returns
3534 */
3535 int
3536 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3537 uint16_t sw_em_2g, uint16_t sw_em_4g)
3538 {
3539 int rval;
3540 mbx_cmd_t mc;
3541 mbx_cmd_t *mcp = &mc;
3542
3543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3544 "Entered %s.\n", __func__);
3545
3546 mcp->mb[0] = MBC_SERDES_PARAMS;
3547 mcp->mb[1] = BIT_0;
3548 mcp->mb[2] = sw_em_1g | BIT_15;
3549 mcp->mb[3] = sw_em_2g | BIT_15;
3550 mcp->mb[4] = sw_em_4g | BIT_15;
3551 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3552 mcp->in_mb = MBX_0;
3553 mcp->tov = MBX_TOV_SECONDS;
3554 mcp->flags = 0;
3555 rval = qla2x00_mailbox_command(vha, mcp);
3556
3557 if (rval != QLA_SUCCESS) {
3558 /*EMPTY*/
3559 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3560 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3561 } else {
3562 /*EMPTY*/
3563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3564 "Done %s.\n", __func__);
3565 }
3566
3567 return rval;
3568 }
3569
3570 int
3571 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3572 {
3573 int rval;
3574 mbx_cmd_t mc;
3575 mbx_cmd_t *mcp = &mc;
3576
3577 if (!IS_FWI2_CAPABLE(vha->hw))
3578 return QLA_FUNCTION_FAILED;
3579
3580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3581 "Entered %s.\n", __func__);
3582
3583 mcp->mb[0] = MBC_STOP_FIRMWARE;
3584 mcp->mb[1] = 0;
3585 mcp->out_mb = MBX_1|MBX_0;
3586 mcp->in_mb = MBX_0;
3587 mcp->tov = 5;
3588 mcp->flags = 0;
3589 rval = qla2x00_mailbox_command(vha, mcp);
3590
3591 if (rval != QLA_SUCCESS) {
3592 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3593 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3594 rval = QLA_INVALID_COMMAND;
3595 } else {
3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3597 "Done %s.\n", __func__);
3598 }
3599
3600 return rval;
3601 }
3602
3603 int
3604 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3605 uint16_t buffers)
3606 {
3607 int rval;
3608 mbx_cmd_t mc;
3609 mbx_cmd_t *mcp = &mc;
3610
3611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3612 "Entered %s.\n", __func__);
3613
3614 if (!IS_FWI2_CAPABLE(vha->hw))
3615 return QLA_FUNCTION_FAILED;
3616
3617 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3618 return QLA_FUNCTION_FAILED;
3619
3620 mcp->mb[0] = MBC_TRACE_CONTROL;
3621 mcp->mb[1] = TC_EFT_ENABLE;
3622 mcp->mb[2] = LSW(eft_dma);
3623 mcp->mb[3] = MSW(eft_dma);
3624 mcp->mb[4] = LSW(MSD(eft_dma));
3625 mcp->mb[5] = MSW(MSD(eft_dma));
3626 mcp->mb[6] = buffers;
3627 mcp->mb[7] = TC_AEN_DISABLE;
3628 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3629 mcp->in_mb = MBX_1|MBX_0;
3630 mcp->tov = MBX_TOV_SECONDS;
3631 mcp->flags = 0;
3632 rval = qla2x00_mailbox_command(vha, mcp);
3633 if (rval != QLA_SUCCESS) {
3634 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3635 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3636 rval, mcp->mb[0], mcp->mb[1]);
3637 } else {
3638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3639 "Done %s.\n", __func__);
3640 }
3641
3642 return rval;
3643 }
3644
3645 int
3646 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3647 {
3648 int rval;
3649 mbx_cmd_t mc;
3650 mbx_cmd_t *mcp = &mc;
3651
3652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3653 "Entered %s.\n", __func__);
3654
3655 if (!IS_FWI2_CAPABLE(vha->hw))
3656 return QLA_FUNCTION_FAILED;
3657
3658 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3659 return QLA_FUNCTION_FAILED;
3660
3661 mcp->mb[0] = MBC_TRACE_CONTROL;
3662 mcp->mb[1] = TC_EFT_DISABLE;
3663 mcp->out_mb = MBX_1|MBX_0;
3664 mcp->in_mb = MBX_1|MBX_0;
3665 mcp->tov = MBX_TOV_SECONDS;
3666 mcp->flags = 0;
3667 rval = qla2x00_mailbox_command(vha, mcp);
3668 if (rval != QLA_SUCCESS) {
3669 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3670 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3671 rval, mcp->mb[0], mcp->mb[1]);
3672 } else {
3673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3674 "Done %s.\n", __func__);
3675 }
3676
3677 return rval;
3678 }
3679
3680 int
3681 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3682 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3683 {
3684 int rval;
3685 mbx_cmd_t mc;
3686 mbx_cmd_t *mcp = &mc;
3687
3688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3689 "Entered %s.\n", __func__);
3690
3691 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3692 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3693 !IS_QLA28XX(vha->hw))
3694 return QLA_FUNCTION_FAILED;
3695
3696 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3697 return QLA_FUNCTION_FAILED;
3698
3699 mcp->mb[0] = MBC_TRACE_CONTROL;
3700 mcp->mb[1] = TC_FCE_ENABLE;
3701 mcp->mb[2] = LSW(fce_dma);
3702 mcp->mb[3] = MSW(fce_dma);
3703 mcp->mb[4] = LSW(MSD(fce_dma));
3704 mcp->mb[5] = MSW(MSD(fce_dma));
3705 mcp->mb[6] = buffers;
3706 mcp->mb[7] = TC_AEN_DISABLE;
3707 mcp->mb[8] = 0;
3708 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3709 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3710 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3711 MBX_1|MBX_0;
3712 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3713 mcp->tov = MBX_TOV_SECONDS;
3714 mcp->flags = 0;
3715 rval = qla2x00_mailbox_command(vha, mcp);
3716 if (rval != QLA_SUCCESS) {
3717 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3718 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3719 rval, mcp->mb[0], mcp->mb[1]);
3720 } else {
3721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3722 "Done %s.\n", __func__);
3723
3724 if (mb)
3725 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3726 if (dwords)
3727 *dwords = buffers;
3728 }
3729
3730 return rval;
3731 }
3732
3733 int
3734 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3735 {
3736 int rval;
3737 mbx_cmd_t mc;
3738 mbx_cmd_t *mcp = &mc;
3739
3740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3741 "Entered %s.\n", __func__);
3742
3743 if (!IS_FWI2_CAPABLE(vha->hw))
3744 return QLA_FUNCTION_FAILED;
3745
3746 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3747 return QLA_FUNCTION_FAILED;
3748
3749 mcp->mb[0] = MBC_TRACE_CONTROL;
3750 mcp->mb[1] = TC_FCE_DISABLE;
3751 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3752 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3753 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3754 MBX_1|MBX_0;
3755 mcp->tov = MBX_TOV_SECONDS;
3756 mcp->flags = 0;
3757 rval = qla2x00_mailbox_command(vha, mcp);
3758 if (rval != QLA_SUCCESS) {
3759 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3760 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3761 rval, mcp->mb[0], mcp->mb[1]);
3762 } else {
3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3764 "Done %s.\n", __func__);
3765
3766 if (wr)
3767 *wr = (uint64_t) mcp->mb[5] << 48 |
3768 (uint64_t) mcp->mb[4] << 32 |
3769 (uint64_t) mcp->mb[3] << 16 |
3770 (uint64_t) mcp->mb[2];
3771 if (rd)
3772 *rd = (uint64_t) mcp->mb[9] << 48 |
3773 (uint64_t) mcp->mb[8] << 32 |
3774 (uint64_t) mcp->mb[7] << 16 |
3775 (uint64_t) mcp->mb[6];
3776 }
3777
3778 return rval;
3779 }
3780
3781 int
3782 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3783 uint16_t *port_speed, uint16_t *mb)
3784 {
3785 int rval;
3786 mbx_cmd_t mc;
3787 mbx_cmd_t *mcp = &mc;
3788
3789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3790 "Entered %s.\n", __func__);
3791
3792 if (!IS_IIDMA_CAPABLE(vha->hw))
3793 return QLA_FUNCTION_FAILED;
3794
3795 mcp->mb[0] = MBC_PORT_PARAMS;
3796 mcp->mb[1] = loop_id;
3797 mcp->mb[2] = mcp->mb[3] = 0;
3798 mcp->mb[9] = vha->vp_idx;
3799 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3800 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3801 mcp->tov = MBX_TOV_SECONDS;
3802 mcp->flags = 0;
3803 rval = qla2x00_mailbox_command(vha, mcp);
3804
3805 /* Return mailbox statuses. */
3806 if (mb) {
3807 mb[0] = mcp->mb[0];
3808 mb[1] = mcp->mb[1];
3809 mb[3] = mcp->mb[3];
3810 }
3811
3812 if (rval != QLA_SUCCESS) {
3813 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3814 } else {
3815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3816 "Done %s.\n", __func__);
3817 if (port_speed)
3818 *port_speed = mcp->mb[3];
3819 }
3820
3821 return rval;
3822 }
3823
3824 int
3825 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3826 uint16_t port_speed, uint16_t *mb)
3827 {
3828 int rval;
3829 mbx_cmd_t mc;
3830 mbx_cmd_t *mcp = &mc;
3831
3832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3833 "Entered %s.\n", __func__);
3834
3835 if (!IS_IIDMA_CAPABLE(vha->hw))
3836 return QLA_FUNCTION_FAILED;
3837
3838 mcp->mb[0] = MBC_PORT_PARAMS;
3839 mcp->mb[1] = loop_id;
3840 mcp->mb[2] = BIT_0;
3841 mcp->mb[3] = port_speed & 0x3F;
3842 mcp->mb[9] = vha->vp_idx;
3843 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3844 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3845 mcp->tov = MBX_TOV_SECONDS;
3846 mcp->flags = 0;
3847 rval = qla2x00_mailbox_command(vha, mcp);
3848
3849 /* Return mailbox statuses. */
3850 if (mb) {
3851 mb[0] = mcp->mb[0];
3852 mb[1] = mcp->mb[1];
3853 mb[3] = mcp->mb[3];
3854 }
3855
3856 if (rval != QLA_SUCCESS) {
3857 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3858 "Failed=%x.\n", rval);
3859 } else {
3860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3861 "Done %s.\n", __func__);
3862 }
3863
3864 return rval;
3865 }
3866
3867 void
3868 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3869 struct vp_rpt_id_entry_24xx *rptid_entry)
3870 {
3871 struct qla_hw_data *ha = vha->hw;
3872 scsi_qla_host_t *vp = NULL;
3873 unsigned long flags;
3874 int found;
3875 port_id_t id;
3876 struct fc_port *fcport;
3877
3878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3879 "Entered %s.\n", __func__);
3880
3881 if (rptid_entry->entry_status != 0)
3882 return;
3883
3884 id.b.domain = rptid_entry->port_id[2];
3885 id.b.area = rptid_entry->port_id[1];
3886 id.b.al_pa = rptid_entry->port_id[0];
3887 id.b.rsvd_1 = 0;
3888 ha->flags.n2n_ae = 0;
3889
3890 if (rptid_entry->format == 0) {
3891 /* loop */
3892 ql_dbg(ql_dbg_async, vha, 0x10b7,
3893 "Format 0 : Number of VPs setup %d, number of "
3894 "VPs acquired %d.\n", rptid_entry->vp_setup,
3895 rptid_entry->vp_acquired);
3896 ql_dbg(ql_dbg_async, vha, 0x10b8,
3897 "Primary port id %02x%02x%02x.\n",
3898 rptid_entry->port_id[2], rptid_entry->port_id[1],
3899 rptid_entry->port_id[0]);
3900 ha->current_topology = ISP_CFG_NL;
3901 qlt_update_host_map(vha, id);
3902
3903 } else if (rptid_entry->format == 1) {
3904 /* fabric */
3905 ql_dbg(ql_dbg_async, vha, 0x10b9,
3906 "Format 1: VP[%d] enabled - status %d - with "
3907 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3908 rptid_entry->vp_status,
3909 rptid_entry->port_id[2], rptid_entry->port_id[1],
3910 rptid_entry->port_id[0]);
3911 ql_dbg(ql_dbg_async, vha, 0x5075,
3912 "Format 1: Remote WWPN %8phC.\n",
3913 rptid_entry->u.f1.port_name);
3914
3915 ql_dbg(ql_dbg_async, vha, 0x5075,
3916 "Format 1: WWPN %8phC.\n",
3917 vha->port_name);
3918
3919 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3920 case TOPO_N2N:
3921 ha->current_topology = ISP_CFG_N;
3922 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3923 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3924 fcport->scan_state = QLA_FCPORT_SCAN;
3925 fcport->n2n_flag = 0;
3926 }
3927 id.b24 = 0;
3928 if (wwn_to_u64(vha->port_name) >
3929 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3930 vha->d_id.b24 = 0;
3931 vha->d_id.b.al_pa = 1;
3932 ha->flags.n2n_bigger = 1;
3933
3934 id.b.al_pa = 2;
3935 ql_dbg(ql_dbg_async, vha, 0x5075,
3936 "Format 1: assign local id %x remote id %x\n",
3937 vha->d_id.b24, id.b24);
3938 } else {
3939 ql_dbg(ql_dbg_async, vha, 0x5075,
3940 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3941 rptid_entry->u.f1.port_name);
3942 ha->flags.n2n_bigger = 0;
3943 }
3944
3945 fcport = qla2x00_find_fcport_by_wwpn(vha,
3946 rptid_entry->u.f1.port_name, 1);
3947 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3948
3949
3950 if (fcport) {
3951 fcport->plogi_nack_done_deadline = jiffies + HZ;
3952 fcport->dm_login_expire = jiffies + 2*HZ;
3953 fcport->scan_state = QLA_FCPORT_FOUND;
3954 fcport->n2n_flag = 1;
3955 fcport->keep_nport_handle = 1;
3956 fcport->fc4_type = FS_FC4TYPE_FCP;
3957 if (vha->flags.nvme_enabled)
3958 fcport->fc4_type |= FS_FC4TYPE_NVME;
3959
3960 if (wwn_to_u64(vha->port_name) >
3961 wwn_to_u64(fcport->port_name)) {
3962 fcport->d_id = id;
3963 }
3964
3965 switch (fcport->disc_state) {
3966 case DSC_DELETED:
3967 set_bit(RELOGIN_NEEDED,
3968 &vha->dpc_flags);
3969 break;
3970 case DSC_DELETE_PEND:
3971 break;
3972 default:
3973 qlt_schedule_sess_for_deletion(fcport);
3974 break;
3975 }
3976 } else {
3977 qla24xx_post_newsess_work(vha, &id,
3978 rptid_entry->u.f1.port_name,
3979 rptid_entry->u.f1.node_name,
3980 NULL,
3981 FS_FCP_IS_N2N);
3982 }
3983
3984 /* if our portname is higher then initiate N2N login */
3985
3986 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3987 return;
3988 break;
3989 case TOPO_FL:
3990 ha->current_topology = ISP_CFG_FL;
3991 break;
3992 case TOPO_F:
3993 ha->current_topology = ISP_CFG_F;
3994 break;
3995 default:
3996 break;
3997 }
3998
3999 ha->flags.gpsc_supported = 1;
4000 ha->current_topology = ISP_CFG_F;
4001 /* buffer to buffer credit flag */
4002 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4003
4004 if (rptid_entry->vp_idx == 0) {
4005 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4006 /* FA-WWN is only for physical port */
4007 if (qla_ini_mode_enabled(vha) &&
4008 ha->flags.fawwpn_enabled &&
4009 (rptid_entry->u.f1.flags &
4010 BIT_6)) {
4011 memcpy(vha->port_name,
4012 rptid_entry->u.f1.port_name,
4013 WWN_SIZE);
4014 }
4015
4016 qlt_update_host_map(vha, id);
4017 }
4018
4019 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4020 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4021 } else {
4022 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4023 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4024 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4025 "Could not acquire ID for VP[%d].\n",
4026 rptid_entry->vp_idx);
4027 return;
4028 }
4029
4030 found = 0;
4031 spin_lock_irqsave(&ha->vport_slock, flags);
4032 list_for_each_entry(vp, &ha->vp_list, list) {
4033 if (rptid_entry->vp_idx == vp->vp_idx) {
4034 found = 1;
4035 break;
4036 }
4037 }
4038 spin_unlock_irqrestore(&ha->vport_slock, flags);
4039
4040 if (!found)
4041 return;
4042
4043 qlt_update_host_map(vp, id);
4044
4045 /*
4046 * Cannot configure here as we are still sitting on the
4047 * response queue. Handle it in dpc context.
4048 */
4049 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4050 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4051 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4052 }
4053 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4054 qla2xxx_wake_dpc(vha);
4055 } else if (rptid_entry->format == 2) {
4056 ql_dbg(ql_dbg_async, vha, 0x505f,
4057 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4058 rptid_entry->port_id[2], rptid_entry->port_id[1],
4059 rptid_entry->port_id[0]);
4060
4061 ql_dbg(ql_dbg_async, vha, 0x5075,
4062 "N2N: Remote WWPN %8phC.\n",
4063 rptid_entry->u.f2.port_name);
4064
4065 /* N2N. direct connect */
4066 ha->current_topology = ISP_CFG_N;
4067 ha->flags.rida_fmt2 = 1;
4068 vha->d_id.b.domain = rptid_entry->port_id[2];
4069 vha->d_id.b.area = rptid_entry->port_id[1];
4070 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4071
4072 ha->flags.n2n_ae = 1;
4073 spin_lock_irqsave(&ha->vport_slock, flags);
4074 qlt_update_vp_map(vha, SET_AL_PA);
4075 spin_unlock_irqrestore(&ha->vport_slock, flags);
4076
4077 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4078 fcport->scan_state = QLA_FCPORT_SCAN;
4079 fcport->n2n_flag = 0;
4080 }
4081
4082 fcport = qla2x00_find_fcport_by_wwpn(vha,
4083 rptid_entry->u.f2.port_name, 1);
4084
4085 if (fcport) {
4086 fcport->login_retry = vha->hw->login_retry_count;
4087 fcport->plogi_nack_done_deadline = jiffies + HZ;
4088 fcport->scan_state = QLA_FCPORT_FOUND;
4089 fcport->keep_nport_handle = 1;
4090 fcport->n2n_flag = 1;
4091 fcport->d_id.b.domain =
4092 rptid_entry->u.f2.remote_nport_id[2];
4093 fcport->d_id.b.area =
4094 rptid_entry->u.f2.remote_nport_id[1];
4095 fcport->d_id.b.al_pa =
4096 rptid_entry->u.f2.remote_nport_id[0];
4097 }
4098 }
4099 }
4100
4101 /*
4102 * qla24xx_modify_vp_config
4103 * Change VP configuration for vha
4104 *
4105 * Input:
4106 * vha = adapter block pointer.
4107 *
4108 * Returns:
4109 * qla2xxx local function return status code.
4110 *
4111 * Context:
4112 * Kernel context.
4113 */
4114 int
4115 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4116 {
4117 int rval;
4118 struct vp_config_entry_24xx *vpmod;
4119 dma_addr_t vpmod_dma;
4120 struct qla_hw_data *ha = vha->hw;
4121 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4122
4123 /* This can be called by the parent */
4124
4125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4126 "Entered %s.\n", __func__);
4127
4128 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4129 if (!vpmod) {
4130 ql_log(ql_log_warn, vha, 0x10bc,
4131 "Failed to allocate modify VP IOCB.\n");
4132 return QLA_MEMORY_ALLOC_FAILED;
4133 }
4134
4135 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4136 vpmod->entry_count = 1;
4137 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4138 vpmod->vp_count = 1;
4139 vpmod->vp_index1 = vha->vp_idx;
4140 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4141
4142 qlt_modify_vp_config(vha, vpmod);
4143
4144 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4145 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4146 vpmod->entry_count = 1;
4147
4148 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4149 if (rval != QLA_SUCCESS) {
4150 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4151 "Failed to issue VP config IOCB (%x).\n", rval);
4152 } else if (vpmod->comp_status != 0) {
4153 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4154 "Failed to complete IOCB -- error status (%x).\n",
4155 vpmod->comp_status);
4156 rval = QLA_FUNCTION_FAILED;
4157 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4158 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4159 "Failed to complete IOCB -- completion status (%x).\n",
4160 le16_to_cpu(vpmod->comp_status));
4161 rval = QLA_FUNCTION_FAILED;
4162 } else {
4163 /* EMPTY */
4164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4165 "Done %s.\n", __func__);
4166 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4167 }
4168 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4169
4170 return rval;
4171 }
4172
4173 /*
4174 * qla2x00_send_change_request
4175 * Receive or disable RSCN request from fabric controller
4176 *
4177 * Input:
4178 * ha = adapter block pointer
4179 * format = registration format:
4180 * 0 - Reserved
4181 * 1 - Fabric detected registration
4182 * 2 - N_port detected registration
4183 * 3 - Full registration
4184 * FF - clear registration
4185 * vp_idx = Virtual port index
4186 *
4187 * Returns:
4188 * qla2x00 local function return status code.
4189 *
4190 * Context:
4191 * Kernel Context
4192 */
4193
4194 int
4195 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4196 uint16_t vp_idx)
4197 {
4198 int rval;
4199 mbx_cmd_t mc;
4200 mbx_cmd_t *mcp = &mc;
4201
4202 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4203 "Entered %s.\n", __func__);
4204
4205 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4206 mcp->mb[1] = format;
4207 mcp->mb[9] = vp_idx;
4208 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4209 mcp->in_mb = MBX_0|MBX_1;
4210 mcp->tov = MBX_TOV_SECONDS;
4211 mcp->flags = 0;
4212 rval = qla2x00_mailbox_command(vha, mcp);
4213
4214 if (rval == QLA_SUCCESS) {
4215 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4216 rval = BIT_1;
4217 }
4218 } else
4219 rval = BIT_1;
4220
4221 return rval;
4222 }
4223
4224 int
4225 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4226 uint32_t size)
4227 {
4228 int rval;
4229 mbx_cmd_t mc;
4230 mbx_cmd_t *mcp = &mc;
4231
4232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4233 "Entered %s.\n", __func__);
4234
4235 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4236 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4237 mcp->mb[8] = MSW(addr);
4238 mcp->out_mb = MBX_8|MBX_0;
4239 } else {
4240 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4241 mcp->out_mb = MBX_0;
4242 }
4243 mcp->mb[1] = LSW(addr);
4244 mcp->mb[2] = MSW(req_dma);
4245 mcp->mb[3] = LSW(req_dma);
4246 mcp->mb[6] = MSW(MSD(req_dma));
4247 mcp->mb[7] = LSW(MSD(req_dma));
4248 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4249 if (IS_FWI2_CAPABLE(vha->hw)) {
4250 mcp->mb[4] = MSW(size);
4251 mcp->mb[5] = LSW(size);
4252 mcp->out_mb |= MBX_5|MBX_4;
4253 } else {
4254 mcp->mb[4] = LSW(size);
4255 mcp->out_mb |= MBX_4;
4256 }
4257
4258 mcp->in_mb = MBX_0;
4259 mcp->tov = MBX_TOV_SECONDS;
4260 mcp->flags = 0;
4261 rval = qla2x00_mailbox_command(vha, mcp);
4262
4263 if (rval != QLA_SUCCESS) {
4264 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4265 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4266 } else {
4267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4268 "Done %s.\n", __func__);
4269 }
4270
4271 return rval;
4272 }
4273 /* 84XX Support **************************************************************/
4274
4275 struct cs84xx_mgmt_cmd {
4276 union {
4277 struct verify_chip_entry_84xx req;
4278 struct verify_chip_rsp_84xx rsp;
4279 } p;
4280 };
4281
4282 int
4283 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4284 {
4285 int rval, retry;
4286 struct cs84xx_mgmt_cmd *mn;
4287 dma_addr_t mn_dma;
4288 uint16_t options;
4289 unsigned long flags;
4290 struct qla_hw_data *ha = vha->hw;
4291
4292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4293 "Entered %s.\n", __func__);
4294
4295 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4296 if (mn == NULL) {
4297 return QLA_MEMORY_ALLOC_FAILED;
4298 }
4299
4300 /* Force Update? */
4301 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4302 /* Diagnostic firmware? */
4303 /* options |= MENLO_DIAG_FW; */
4304 /* We update the firmware with only one data sequence. */
4305 options |= VCO_END_OF_DATA;
4306
4307 do {
4308 retry = 0;
4309 memset(mn, 0, sizeof(*mn));
4310 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4311 mn->p.req.entry_count = 1;
4312 mn->p.req.options = cpu_to_le16(options);
4313
4314 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4315 "Dump of Verify Request.\n");
4316 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4317 mn, sizeof(*mn));
4318
4319 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4320 if (rval != QLA_SUCCESS) {
4321 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4322 "Failed to issue verify IOCB (%x).\n", rval);
4323 goto verify_done;
4324 }
4325
4326 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4327 "Dump of Verify Response.\n");
4328 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4329 mn, sizeof(*mn));
4330
4331 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4332 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4333 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4335 "cs=%x fc=%x.\n", status[0], status[1]);
4336
4337 if (status[0] != CS_COMPLETE) {
4338 rval = QLA_FUNCTION_FAILED;
4339 if (!(options & VCO_DONT_UPDATE_FW)) {
4340 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4341 "Firmware update failed. Retrying "
4342 "without update firmware.\n");
4343 options |= VCO_DONT_UPDATE_FW;
4344 options &= ~VCO_FORCE_UPDATE;
4345 retry = 1;
4346 }
4347 } else {
4348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4349 "Firmware updated to %x.\n",
4350 le32_to_cpu(mn->p.rsp.fw_ver));
4351
4352 /* NOTE: we only update OP firmware. */
4353 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4354 ha->cs84xx->op_fw_version =
4355 le32_to_cpu(mn->p.rsp.fw_ver);
4356 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4357 flags);
4358 }
4359 } while (retry);
4360
4361 verify_done:
4362 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4363
4364 if (rval != QLA_SUCCESS) {
4365 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4366 "Failed=%x.\n", rval);
4367 } else {
4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4369 "Done %s.\n", __func__);
4370 }
4371
4372 return rval;
4373 }
4374
4375 int
4376 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4377 {
4378 int rval;
4379 unsigned long flags;
4380 mbx_cmd_t mc;
4381 mbx_cmd_t *mcp = &mc;
4382 struct qla_hw_data *ha = vha->hw;
4383
4384 if (!ha->flags.fw_started)
4385 return QLA_SUCCESS;
4386
4387 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4388 "Entered %s.\n", __func__);
4389
4390 if (IS_SHADOW_REG_CAPABLE(ha))
4391 req->options |= BIT_13;
4392
4393 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4394 mcp->mb[1] = req->options;
4395 mcp->mb[2] = MSW(LSD(req->dma));
4396 mcp->mb[3] = LSW(LSD(req->dma));
4397 mcp->mb[6] = MSW(MSD(req->dma));
4398 mcp->mb[7] = LSW(MSD(req->dma));
4399 mcp->mb[5] = req->length;
4400 if (req->rsp)
4401 mcp->mb[10] = req->rsp->id;
4402 mcp->mb[12] = req->qos;
4403 mcp->mb[11] = req->vp_idx;
4404 mcp->mb[13] = req->rid;
4405 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4406 mcp->mb[15] = 0;
4407
4408 mcp->mb[4] = req->id;
4409 /* que in ptr index */
4410 mcp->mb[8] = 0;
4411 /* que out ptr index */
4412 mcp->mb[9] = *req->out_ptr = 0;
4413 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4414 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4415 mcp->in_mb = MBX_0;
4416 mcp->flags = MBX_DMA_OUT;
4417 mcp->tov = MBX_TOV_SECONDS * 2;
4418
4419 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4420 IS_QLA28XX(ha))
4421 mcp->in_mb |= MBX_1;
4422 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4423 mcp->out_mb |= MBX_15;
4424 /* debug q create issue in SR-IOV */
4425 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4426 }
4427
4428 spin_lock_irqsave(&ha->hardware_lock, flags);
4429 if (!(req->options & BIT_0)) {
4430 WRT_REG_DWORD(req->req_q_in, 0);
4431 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4432 WRT_REG_DWORD(req->req_q_out, 0);
4433 }
4434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4435
4436 rval = qla2x00_mailbox_command(vha, mcp);
4437 if (rval != QLA_SUCCESS) {
4438 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4439 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4440 } else {
4441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4442 "Done %s.\n", __func__);
4443 }
4444
4445 return rval;
4446 }
4447
4448 int
4449 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4450 {
4451 int rval;
4452 unsigned long flags;
4453 mbx_cmd_t mc;
4454 mbx_cmd_t *mcp = &mc;
4455 struct qla_hw_data *ha = vha->hw;
4456
4457 if (!ha->flags.fw_started)
4458 return QLA_SUCCESS;
4459
4460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4461 "Entered %s.\n", __func__);
4462
4463 if (IS_SHADOW_REG_CAPABLE(ha))
4464 rsp->options |= BIT_13;
4465
4466 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4467 mcp->mb[1] = rsp->options;
4468 mcp->mb[2] = MSW(LSD(rsp->dma));
4469 mcp->mb[3] = LSW(LSD(rsp->dma));
4470 mcp->mb[6] = MSW(MSD(rsp->dma));
4471 mcp->mb[7] = LSW(MSD(rsp->dma));
4472 mcp->mb[5] = rsp->length;
4473 mcp->mb[14] = rsp->msix->entry;
4474 mcp->mb[13] = rsp->rid;
4475 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4476 mcp->mb[15] = 0;
4477
4478 mcp->mb[4] = rsp->id;
4479 /* que in ptr index */
4480 mcp->mb[8] = *rsp->in_ptr = 0;
4481 /* que out ptr index */
4482 mcp->mb[9] = 0;
4483 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4484 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4485 mcp->in_mb = MBX_0;
4486 mcp->flags = MBX_DMA_OUT;
4487 mcp->tov = MBX_TOV_SECONDS * 2;
4488
4489 if (IS_QLA81XX(ha)) {
4490 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4491 mcp->in_mb |= MBX_1;
4492 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4493 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4494 mcp->in_mb |= MBX_1;
4495 /* debug q create issue in SR-IOV */
4496 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4497 }
4498
4499 spin_lock_irqsave(&ha->hardware_lock, flags);
4500 if (!(rsp->options & BIT_0)) {
4501 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4502 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4503 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4504 }
4505
4506 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4507
4508 rval = qla2x00_mailbox_command(vha, mcp);
4509 if (rval != QLA_SUCCESS) {
4510 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4511 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4512 } else {
4513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4514 "Done %s.\n", __func__);
4515 }
4516
4517 return rval;
4518 }
4519
4520 int
4521 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4522 {
4523 int rval;
4524 mbx_cmd_t mc;
4525 mbx_cmd_t *mcp = &mc;
4526
4527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4528 "Entered %s.\n", __func__);
4529
4530 mcp->mb[0] = MBC_IDC_ACK;
4531 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4532 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4533 mcp->in_mb = MBX_0;
4534 mcp->tov = MBX_TOV_SECONDS;
4535 mcp->flags = 0;
4536 rval = qla2x00_mailbox_command(vha, mcp);
4537
4538 if (rval != QLA_SUCCESS) {
4539 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4540 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4541 } else {
4542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4543 "Done %s.\n", __func__);
4544 }
4545
4546 return rval;
4547 }
4548
4549 int
4550 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4551 {
4552 int rval;
4553 mbx_cmd_t mc;
4554 mbx_cmd_t *mcp = &mc;
4555
4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4557 "Entered %s.\n", __func__);
4558
4559 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4560 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4561 return QLA_FUNCTION_FAILED;
4562
4563 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4564 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4565 mcp->out_mb = MBX_1|MBX_0;
4566 mcp->in_mb = MBX_1|MBX_0;
4567 mcp->tov = MBX_TOV_SECONDS;
4568 mcp->flags = 0;
4569 rval = qla2x00_mailbox_command(vha, mcp);
4570
4571 if (rval != QLA_SUCCESS) {
4572 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4573 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4574 rval, mcp->mb[0], mcp->mb[1]);
4575 } else {
4576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4577 "Done %s.\n", __func__);
4578 *sector_size = mcp->mb[1];
4579 }
4580
4581 return rval;
4582 }
4583
4584 int
4585 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4586 {
4587 int rval;
4588 mbx_cmd_t mc;
4589 mbx_cmd_t *mcp = &mc;
4590
4591 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4592 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4593 return QLA_FUNCTION_FAILED;
4594
4595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4596 "Entered %s.\n", __func__);
4597
4598 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4599 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4600 FAC_OPT_CMD_WRITE_PROTECT;
4601 mcp->out_mb = MBX_1|MBX_0;
4602 mcp->in_mb = MBX_1|MBX_0;
4603 mcp->tov = MBX_TOV_SECONDS;
4604 mcp->flags = 0;
4605 rval = qla2x00_mailbox_command(vha, mcp);
4606
4607 if (rval != QLA_SUCCESS) {
4608 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4609 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4610 rval, mcp->mb[0], mcp->mb[1]);
4611 } else {
4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4613 "Done %s.\n", __func__);
4614 }
4615
4616 return rval;
4617 }
4618
4619 int
4620 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4621 {
4622 int rval;
4623 mbx_cmd_t mc;
4624 mbx_cmd_t *mcp = &mc;
4625
4626 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4627 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4628 return QLA_FUNCTION_FAILED;
4629
4630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4631 "Entered %s.\n", __func__);
4632
4633 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4634 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4635 mcp->mb[2] = LSW(start);
4636 mcp->mb[3] = MSW(start);
4637 mcp->mb[4] = LSW(finish);
4638 mcp->mb[5] = MSW(finish);
4639 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4640 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4641 mcp->tov = MBX_TOV_SECONDS;
4642 mcp->flags = 0;
4643 rval = qla2x00_mailbox_command(vha, mcp);
4644
4645 if (rval != QLA_SUCCESS) {
4646 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4647 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4648 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4649 } else {
4650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4651 "Done %s.\n", __func__);
4652 }
4653
4654 return rval;
4655 }
4656
4657 int
4658 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4659 {
4660 int rval = QLA_SUCCESS;
4661 mbx_cmd_t mc;
4662 mbx_cmd_t *mcp = &mc;
4663 struct qla_hw_data *ha = vha->hw;
4664
4665 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4666 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4667 return rval;
4668
4669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4670 "Entered %s.\n", __func__);
4671
4672 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4673 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4674 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4675 mcp->out_mb = MBX_1|MBX_0;
4676 mcp->in_mb = MBX_1|MBX_0;
4677 mcp->tov = MBX_TOV_SECONDS;
4678 mcp->flags = 0;
4679 rval = qla2x00_mailbox_command(vha, mcp);
4680
4681 if (rval != QLA_SUCCESS) {
4682 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4683 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4684 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4685 } else {
4686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4687 "Done %s.\n", __func__);
4688 }
4689
4690 return rval;
4691 }
4692
4693 int
4694 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4695 {
4696 int rval = 0;
4697 mbx_cmd_t mc;
4698 mbx_cmd_t *mcp = &mc;
4699
4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4701 "Entered %s.\n", __func__);
4702
4703 mcp->mb[0] = MBC_RESTART_MPI_FW;
4704 mcp->out_mb = MBX_0;
4705 mcp->in_mb = MBX_0|MBX_1;
4706 mcp->tov = MBX_TOV_SECONDS;
4707 mcp->flags = 0;
4708 rval = qla2x00_mailbox_command(vha, mcp);
4709
4710 if (rval != QLA_SUCCESS) {
4711 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4712 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4713 rval, mcp->mb[0], mcp->mb[1]);
4714 } else {
4715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4716 "Done %s.\n", __func__);
4717 }
4718
4719 return rval;
4720 }
4721
4722 int
4723 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4724 {
4725 int rval;
4726 mbx_cmd_t mc;
4727 mbx_cmd_t *mcp = &mc;
4728 int i;
4729 int len;
4730 uint16_t *str;
4731 struct qla_hw_data *ha = vha->hw;
4732
4733 if (!IS_P3P_TYPE(ha))
4734 return QLA_FUNCTION_FAILED;
4735
4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4737 "Entered %s.\n", __func__);
4738
4739 str = (void *)version;
4740 len = strlen(version);
4741
4742 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4743 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4744 mcp->out_mb = MBX_1|MBX_0;
4745 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4746 mcp->mb[i] = cpu_to_le16p(str);
4747 mcp->out_mb |= 1<<i;
4748 }
4749 for (; i < 16; i++) {
4750 mcp->mb[i] = 0;
4751 mcp->out_mb |= 1<<i;
4752 }
4753 mcp->in_mb = MBX_1|MBX_0;
4754 mcp->tov = MBX_TOV_SECONDS;
4755 mcp->flags = 0;
4756 rval = qla2x00_mailbox_command(vha, mcp);
4757
4758 if (rval != QLA_SUCCESS) {
4759 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4760 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4761 } else {
4762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4763 "Done %s.\n", __func__);
4764 }
4765
4766 return rval;
4767 }
4768
4769 int
4770 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4771 {
4772 int rval;
4773 mbx_cmd_t mc;
4774 mbx_cmd_t *mcp = &mc;
4775 int len;
4776 uint16_t dwlen;
4777 uint8_t *str;
4778 dma_addr_t str_dma;
4779 struct qla_hw_data *ha = vha->hw;
4780
4781 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4782 IS_P3P_TYPE(ha))
4783 return QLA_FUNCTION_FAILED;
4784
4785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4786 "Entered %s.\n", __func__);
4787
4788 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4789 if (!str) {
4790 ql_log(ql_log_warn, vha, 0x117f,
4791 "Failed to allocate driver version param.\n");
4792 return QLA_MEMORY_ALLOC_FAILED;
4793 }
4794
4795 memcpy(str, "\x7\x3\x11\x0", 4);
4796 dwlen = str[0];
4797 len = dwlen * 4 - 4;
4798 memset(str + 4, 0, len);
4799 if (len > strlen(version))
4800 len = strlen(version);
4801 memcpy(str + 4, version, len);
4802
4803 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4804 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4805 mcp->mb[2] = MSW(LSD(str_dma));
4806 mcp->mb[3] = LSW(LSD(str_dma));
4807 mcp->mb[6] = MSW(MSD(str_dma));
4808 mcp->mb[7] = LSW(MSD(str_dma));
4809 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4810 mcp->in_mb = MBX_1|MBX_0;
4811 mcp->tov = MBX_TOV_SECONDS;
4812 mcp->flags = 0;
4813 rval = qla2x00_mailbox_command(vha, mcp);
4814
4815 if (rval != QLA_SUCCESS) {
4816 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4817 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4818 } else {
4819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4820 "Done %s.\n", __func__);
4821 }
4822
4823 dma_pool_free(ha->s_dma_pool, str, str_dma);
4824
4825 return rval;
4826 }
4827
4828 int
4829 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4830 void *buf, uint16_t bufsiz)
4831 {
4832 int rval, i;
4833 mbx_cmd_t mc;
4834 mbx_cmd_t *mcp = &mc;
4835 uint32_t *bp;
4836
4837 if (!IS_FWI2_CAPABLE(vha->hw))
4838 return QLA_FUNCTION_FAILED;
4839
4840 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4841 "Entered %s.\n", __func__);
4842
4843 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4844 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4845 mcp->mb[2] = MSW(buf_dma);
4846 mcp->mb[3] = LSW(buf_dma);
4847 mcp->mb[6] = MSW(MSD(buf_dma));
4848 mcp->mb[7] = LSW(MSD(buf_dma));
4849 mcp->mb[8] = bufsiz/4;
4850 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4851 mcp->in_mb = MBX_1|MBX_0;
4852 mcp->tov = MBX_TOV_SECONDS;
4853 mcp->flags = 0;
4854 rval = qla2x00_mailbox_command(vha, mcp);
4855
4856 if (rval != QLA_SUCCESS) {
4857 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4858 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4859 } else {
4860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4861 "Done %s.\n", __func__);
4862 bp = (uint32_t *) buf;
4863 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4864 *bp = le32_to_cpu(*bp);
4865 }
4866
4867 return rval;
4868 }
4869
4870 int
4871 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4872 {
4873 int rval;
4874 mbx_cmd_t mc;
4875 mbx_cmd_t *mcp = &mc;
4876 uint8_t *els_cmd_map;
4877 dma_addr_t els_cmd_map_dma;
4878 uint cmd_opcode = ELS_COMMAND_RDP;
4879 uint index = cmd_opcode / 8;
4880 uint bit = cmd_opcode % 8;
4881 struct qla_hw_data *ha = vha->hw;
4882
4883 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
4884 return QLA_SUCCESS;
4885
4886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
4887 "Entered %s.\n", __func__);
4888
4889 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4890 &els_cmd_map_dma, GFP_KERNEL);
4891 if (!els_cmd_map) {
4892 ql_log(ql_log_warn, vha, 0x7101,
4893 "Failed to allocate RDP els command param.\n");
4894 return QLA_MEMORY_ALLOC_FAILED;
4895 }
4896
4897 els_cmd_map[index] |= 1 << bit;
4898
4899 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4900 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
4901 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
4902 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
4903 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
4904 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
4905 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4906 mcp->in_mb = MBX_1|MBX_0;
4907 mcp->tov = MBX_TOV_SECONDS;
4908 mcp->flags = MBX_DMA_OUT;
4909 mcp->buf_size = ELS_CMD_MAP_SIZE;
4910 rval = qla2x00_mailbox_command(vha, mcp);
4911
4912 if (rval != QLA_SUCCESS) {
4913 ql_dbg(ql_dbg_mbx, vha, 0x118d,
4914 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
4915 } else {
4916 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
4917 "Done %s.\n", __func__);
4918 }
4919
4920 dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
4921 els_cmd_map, els_cmd_map_dma);
4922
4923 return rval;
4924 }
4925
4926 int
4927 qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
4928 dma_addr_t bbc_dma)
4929 {
4930 mbx_cmd_t mc;
4931 mbx_cmd_t *mcp = &mc;
4932 int rval;
4933
4934 if (!IS_FWI2_CAPABLE(vha->hw))
4935 return QLA_FUNCTION_FAILED;
4936
4937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
4938 "Entered %s.\n", __func__);
4939
4940 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4941 mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
4942 mcp->mb[2] = MSW(LSD(bbc_dma));
4943 mcp->mb[3] = LSW(LSD(bbc_dma));
4944 mcp->mb[6] = MSW(MSD(bbc_dma));
4945 mcp->mb[7] = LSW(MSD(bbc_dma));
4946 mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
4947 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4948 mcp->in_mb = MBX_1|MBX_0;
4949 mcp->buf_size = sizeof(*bbc);
4950 mcp->flags = MBX_DMA_IN;
4951 mcp->tov = MBX_TOV_SECONDS;
4952 rval = qla2x00_mailbox_command(vha, mcp);
4953
4954 if (rval != QLA_SUCCESS) {
4955 ql_dbg(ql_dbg_mbx, vha, 0x118f,
4956 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4957 } else {
4958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
4959 "Done %s.\n", __func__);
4960 }
4961
4962 return rval;
4963 }
4964
4965 static int
4966 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4967 {
4968 int rval;
4969 mbx_cmd_t mc;
4970 mbx_cmd_t *mcp = &mc;
4971
4972 if (!IS_FWI2_CAPABLE(vha->hw))
4973 return QLA_FUNCTION_FAILED;
4974
4975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4976 "Entered %s.\n", __func__);
4977
4978 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4979 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4980 mcp->out_mb = MBX_1|MBX_0;
4981 mcp->in_mb = MBX_1|MBX_0;
4982 mcp->tov = MBX_TOV_SECONDS;
4983 mcp->flags = 0;
4984 rval = qla2x00_mailbox_command(vha, mcp);
4985 *temp = mcp->mb[1];
4986
4987 if (rval != QLA_SUCCESS) {
4988 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4989 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4990 } else {
4991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4992 "Done %s.\n", __func__);
4993 }
4994
4995 return rval;
4996 }
4997
4998 int
4999 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5000 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5001 {
5002 int rval;
5003 mbx_cmd_t mc;
5004 mbx_cmd_t *mcp = &mc;
5005 struct qla_hw_data *ha = vha->hw;
5006
5007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5008 "Entered %s.\n", __func__);
5009
5010 if (!IS_FWI2_CAPABLE(ha))
5011 return QLA_FUNCTION_FAILED;
5012
5013 if (len == 1)
5014 opt |= BIT_0;
5015
5016 mcp->mb[0] = MBC_READ_SFP;
5017 mcp->mb[1] = dev;
5018 mcp->mb[2] = MSW(LSD(sfp_dma));
5019 mcp->mb[3] = LSW(LSD(sfp_dma));
5020 mcp->mb[6] = MSW(MSD(sfp_dma));
5021 mcp->mb[7] = LSW(MSD(sfp_dma));
5022 mcp->mb[8] = len;
5023 mcp->mb[9] = off;
5024 mcp->mb[10] = opt;
5025 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5026 mcp->in_mb = MBX_1|MBX_0;
5027 mcp->tov = MBX_TOV_SECONDS;
5028 mcp->flags = 0;
5029 rval = qla2x00_mailbox_command(vha, mcp);
5030
5031 if (opt & BIT_0)
5032 *sfp = mcp->mb[1];
5033
5034 if (rval != QLA_SUCCESS) {
5035 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5036 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5037 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5038 /* sfp is not there */
5039 rval = QLA_INTERFACE_ERROR;
5040 }
5041 } else {
5042 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5043 "Done %s.\n", __func__);
5044 }
5045
5046 return rval;
5047 }
5048
5049 int
5050 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5051 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5052 {
5053 int rval;
5054 mbx_cmd_t mc;
5055 mbx_cmd_t *mcp = &mc;
5056 struct qla_hw_data *ha = vha->hw;
5057
5058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5059 "Entered %s.\n", __func__);
5060
5061 if (!IS_FWI2_CAPABLE(ha))
5062 return QLA_FUNCTION_FAILED;
5063
5064 if (len == 1)
5065 opt |= BIT_0;
5066
5067 if (opt & BIT_0)
5068 len = *sfp;
5069
5070 mcp->mb[0] = MBC_WRITE_SFP;
5071 mcp->mb[1] = dev;
5072 mcp->mb[2] = MSW(LSD(sfp_dma));
5073 mcp->mb[3] = LSW(LSD(sfp_dma));
5074 mcp->mb[6] = MSW(MSD(sfp_dma));
5075 mcp->mb[7] = LSW(MSD(sfp_dma));
5076 mcp->mb[8] = len;
5077 mcp->mb[9] = off;
5078 mcp->mb[10] = opt;
5079 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5080 mcp->in_mb = MBX_1|MBX_0;
5081 mcp->tov = MBX_TOV_SECONDS;
5082 mcp->flags = 0;
5083 rval = qla2x00_mailbox_command(vha, mcp);
5084
5085 if (rval != QLA_SUCCESS) {
5086 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5087 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5088 } else {
5089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5090 "Done %s.\n", __func__);
5091 }
5092
5093 return rval;
5094 }
5095
5096 int
5097 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5098 uint16_t size_in_bytes, uint16_t *actual_size)
5099 {
5100 int rval;
5101 mbx_cmd_t mc;
5102 mbx_cmd_t *mcp = &mc;
5103
5104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5105 "Entered %s.\n", __func__);
5106
5107 if (!IS_CNA_CAPABLE(vha->hw))
5108 return QLA_FUNCTION_FAILED;
5109
5110 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5111 mcp->mb[2] = MSW(stats_dma);
5112 mcp->mb[3] = LSW(stats_dma);
5113 mcp->mb[6] = MSW(MSD(stats_dma));
5114 mcp->mb[7] = LSW(MSD(stats_dma));
5115 mcp->mb[8] = size_in_bytes >> 2;
5116 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5117 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5118 mcp->tov = MBX_TOV_SECONDS;
5119 mcp->flags = 0;
5120 rval = qla2x00_mailbox_command(vha, mcp);
5121
5122 if (rval != QLA_SUCCESS) {
5123 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5124 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5125 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5126 } else {
5127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5128 "Done %s.\n", __func__);
5129
5130
5131 *actual_size = mcp->mb[2] << 2;
5132 }
5133
5134 return rval;
5135 }
5136
5137 int
5138 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5139 uint16_t size)
5140 {
5141 int rval;
5142 mbx_cmd_t mc;
5143 mbx_cmd_t *mcp = &mc;
5144
5145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5146 "Entered %s.\n", __func__);
5147
5148 if (!IS_CNA_CAPABLE(vha->hw))
5149 return QLA_FUNCTION_FAILED;
5150
5151 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5152 mcp->mb[1] = 0;
5153 mcp->mb[2] = MSW(tlv_dma);
5154 mcp->mb[3] = LSW(tlv_dma);
5155 mcp->mb[6] = MSW(MSD(tlv_dma));
5156 mcp->mb[7] = LSW(MSD(tlv_dma));
5157 mcp->mb[8] = size;
5158 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5159 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5160 mcp->tov = MBX_TOV_SECONDS;
5161 mcp->flags = 0;
5162 rval = qla2x00_mailbox_command(vha, mcp);
5163
5164 if (rval != QLA_SUCCESS) {
5165 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5166 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5167 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5168 } else {
5169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5170 "Done %s.\n", __func__);
5171 }
5172
5173 return rval;
5174 }
5175
5176 int
5177 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5178 {
5179 int rval;
5180 mbx_cmd_t mc;
5181 mbx_cmd_t *mcp = &mc;
5182
5183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5184 "Entered %s.\n", __func__);
5185
5186 if (!IS_FWI2_CAPABLE(vha->hw))
5187 return QLA_FUNCTION_FAILED;
5188
5189 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5190 mcp->mb[1] = LSW(risc_addr);
5191 mcp->mb[8] = MSW(risc_addr);
5192 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5193 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5194 mcp->tov = 30;
5195 mcp->flags = 0;
5196 rval = qla2x00_mailbox_command(vha, mcp);
5197 if (rval != QLA_SUCCESS) {
5198 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5199 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5200 } else {
5201 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5202 "Done %s.\n", __func__);
5203 *data = mcp->mb[3] << 16 | mcp->mb[2];
5204 }
5205
5206 return rval;
5207 }
5208
5209 int
5210 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5211 uint16_t *mresp)
5212 {
5213 int rval;
5214 mbx_cmd_t mc;
5215 mbx_cmd_t *mcp = &mc;
5216
5217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5218 "Entered %s.\n", __func__);
5219
5220 memset(mcp->mb, 0 , sizeof(mcp->mb));
5221 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5222 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5223
5224 /* transfer count */
5225 mcp->mb[10] = LSW(mreq->transfer_size);
5226 mcp->mb[11] = MSW(mreq->transfer_size);
5227
5228 /* send data address */
5229 mcp->mb[14] = LSW(mreq->send_dma);
5230 mcp->mb[15] = MSW(mreq->send_dma);
5231 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5232 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5233
5234 /* receive data address */
5235 mcp->mb[16] = LSW(mreq->rcv_dma);
5236 mcp->mb[17] = MSW(mreq->rcv_dma);
5237 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5238 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5239
5240 /* Iteration count */
5241 mcp->mb[18] = LSW(mreq->iteration_count);
5242 mcp->mb[19] = MSW(mreq->iteration_count);
5243
5244 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5245 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5246 if (IS_CNA_CAPABLE(vha->hw))
5247 mcp->out_mb |= MBX_2;
5248 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5249
5250 mcp->buf_size = mreq->transfer_size;
5251 mcp->tov = MBX_TOV_SECONDS;
5252 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5253
5254 rval = qla2x00_mailbox_command(vha, mcp);
5255
5256 if (rval != QLA_SUCCESS) {
5257 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5258 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5259 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5260 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5261 } else {
5262 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5263 "Done %s.\n", __func__);
5264 }
5265
5266 /* Copy mailbox information */
5267 memcpy( mresp, mcp->mb, 64);
5268 return rval;
5269 }
5270
5271 int
5272 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5273 uint16_t *mresp)
5274 {
5275 int rval;
5276 mbx_cmd_t mc;
5277 mbx_cmd_t *mcp = &mc;
5278 struct qla_hw_data *ha = vha->hw;
5279
5280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5281 "Entered %s.\n", __func__);
5282
5283 memset(mcp->mb, 0 , sizeof(mcp->mb));
5284 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5285 /* BIT_6 specifies 64bit address */
5286 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5287 if (IS_CNA_CAPABLE(ha)) {
5288 mcp->mb[2] = vha->fcoe_fcf_idx;
5289 }
5290 mcp->mb[16] = LSW(mreq->rcv_dma);
5291 mcp->mb[17] = MSW(mreq->rcv_dma);
5292 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5293 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5294
5295 mcp->mb[10] = LSW(mreq->transfer_size);
5296
5297 mcp->mb[14] = LSW(mreq->send_dma);
5298 mcp->mb[15] = MSW(mreq->send_dma);
5299 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5300 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5301
5302 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5303 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5304 if (IS_CNA_CAPABLE(ha))
5305 mcp->out_mb |= MBX_2;
5306
5307 mcp->in_mb = MBX_0;
5308 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5309 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5310 mcp->in_mb |= MBX_1;
5311 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5312 IS_QLA28XX(ha))
5313 mcp->in_mb |= MBX_3;
5314
5315 mcp->tov = MBX_TOV_SECONDS;
5316 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5317 mcp->buf_size = mreq->transfer_size;
5318
5319 rval = qla2x00_mailbox_command(vha, mcp);
5320
5321 if (rval != QLA_SUCCESS) {
5322 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5323 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5324 rval, mcp->mb[0], mcp->mb[1]);
5325 } else {
5326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5327 "Done %s.\n", __func__);
5328 }
5329
5330 /* Copy mailbox information */
5331 memcpy(mresp, mcp->mb, 64);
5332 return rval;
5333 }
5334
5335 int
5336 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5337 {
5338 int rval;
5339 mbx_cmd_t mc;
5340 mbx_cmd_t *mcp = &mc;
5341
5342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5343 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5344
5345 mcp->mb[0] = MBC_ISP84XX_RESET;
5346 mcp->mb[1] = enable_diagnostic;
5347 mcp->out_mb = MBX_1|MBX_0;
5348 mcp->in_mb = MBX_1|MBX_0;
5349 mcp->tov = MBX_TOV_SECONDS;
5350 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5351 rval = qla2x00_mailbox_command(vha, mcp);
5352
5353 if (rval != QLA_SUCCESS)
5354 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5355 else
5356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5357 "Done %s.\n", __func__);
5358
5359 return rval;
5360 }
5361
5362 int
5363 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5364 {
5365 int rval;
5366 mbx_cmd_t mc;
5367 mbx_cmd_t *mcp = &mc;
5368
5369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5370 "Entered %s.\n", __func__);
5371
5372 if (!IS_FWI2_CAPABLE(vha->hw))
5373 return QLA_FUNCTION_FAILED;
5374
5375 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5376 mcp->mb[1] = LSW(risc_addr);
5377 mcp->mb[2] = LSW(data);
5378 mcp->mb[3] = MSW(data);
5379 mcp->mb[8] = MSW(risc_addr);
5380 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5381 mcp->in_mb = MBX_1|MBX_0;
5382 mcp->tov = 30;
5383 mcp->flags = 0;
5384 rval = qla2x00_mailbox_command(vha, mcp);
5385 if (rval != QLA_SUCCESS) {
5386 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5387 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5388 rval, mcp->mb[0], mcp->mb[1]);
5389 } else {
5390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5391 "Done %s.\n", __func__);
5392 }
5393
5394 return rval;
5395 }
5396
5397 int
5398 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5399 {
5400 int rval;
5401 uint32_t stat, timer;
5402 uint16_t mb0 = 0;
5403 struct qla_hw_data *ha = vha->hw;
5404 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5405
5406 rval = QLA_SUCCESS;
5407
5408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5409 "Entered %s.\n", __func__);
5410
5411 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5412
5413 /* Write the MBC data to the registers */
5414 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5415 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5416 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5417 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5418 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5419
5420 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5421
5422 /* Poll for MBC interrupt */
5423 for (timer = 6000000; timer; timer--) {
5424 /* Check for pending interrupts. */
5425 stat = RD_REG_DWORD(&reg->host_status);
5426 if (stat & HSRX_RISC_INT) {
5427 stat &= 0xff;
5428
5429 if (stat == 0x1 || stat == 0x2 ||
5430 stat == 0x10 || stat == 0x11) {
5431 set_bit(MBX_INTERRUPT,
5432 &ha->mbx_cmd_flags);
5433 mb0 = RD_REG_WORD(&reg->mailbox0);
5434 WRT_REG_DWORD(&reg->hccr,
5435 HCCRX_CLR_RISC_INT);
5436 RD_REG_DWORD(&reg->hccr);
5437 break;
5438 }
5439 }
5440 udelay(5);
5441 }
5442
5443 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5444 rval = mb0 & MBS_MASK;
5445 else
5446 rval = QLA_FUNCTION_FAILED;
5447
5448 if (rval != QLA_SUCCESS) {
5449 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5450 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5451 } else {
5452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5453 "Done %s.\n", __func__);
5454 }
5455
5456 return rval;
5457 }
5458
5459 /* Set the specified data rate */
5460 int
5461 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5462 {
5463 int rval;
5464 mbx_cmd_t mc;
5465 mbx_cmd_t *mcp = &mc;
5466 struct qla_hw_data *ha = vha->hw;
5467 uint16_t val;
5468
5469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5470 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5471 mode);
5472
5473 if (!IS_FWI2_CAPABLE(ha))
5474 return QLA_FUNCTION_FAILED;
5475
5476 memset(mcp, 0, sizeof(*mcp));
5477 switch (ha->set_data_rate) {
5478 case PORT_SPEED_AUTO:
5479 case PORT_SPEED_4GB:
5480 case PORT_SPEED_8GB:
5481 case PORT_SPEED_16GB:
5482 case PORT_SPEED_32GB:
5483 val = ha->set_data_rate;
5484 break;
5485 default:
5486 ql_log(ql_log_warn, vha, 0x1199,
5487 "Unrecognized speed setting:%d. Setting Autoneg\n",
5488 ha->set_data_rate);
5489 val = ha->set_data_rate = PORT_SPEED_AUTO;
5490 break;
5491 }
5492
5493 mcp->mb[0] = MBC_DATA_RATE;
5494 mcp->mb[1] = mode;
5495 mcp->mb[2] = val;
5496
5497 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5498 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5499 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5500 mcp->in_mb |= MBX_4|MBX_3;
5501 mcp->tov = MBX_TOV_SECONDS;
5502 mcp->flags = 0;
5503 rval = qla2x00_mailbox_command(vha, mcp);
5504 if (rval != QLA_SUCCESS) {
5505 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5506 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5507 } else {
5508 if (mcp->mb[1] != 0x7)
5509 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5510 "Speed set:0x%x\n", mcp->mb[1]);
5511
5512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5513 "Done %s.\n", __func__);
5514 }
5515
5516 return rval;
5517 }
5518
5519 int
5520 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5521 {
5522 int rval;
5523 mbx_cmd_t mc;
5524 mbx_cmd_t *mcp = &mc;
5525 struct qla_hw_data *ha = vha->hw;
5526
5527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5528 "Entered %s.\n", __func__);
5529
5530 if (!IS_FWI2_CAPABLE(ha))
5531 return QLA_FUNCTION_FAILED;
5532
5533 mcp->mb[0] = MBC_DATA_RATE;
5534 mcp->mb[1] = QLA_GET_DATA_RATE;
5535 mcp->out_mb = MBX_1|MBX_0;
5536 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5537 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5538 mcp->in_mb |= MBX_3;
5539 mcp->tov = MBX_TOV_SECONDS;
5540 mcp->flags = 0;
5541 rval = qla2x00_mailbox_command(vha, mcp);
5542 if (rval != QLA_SUCCESS) {
5543 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5544 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5545 } else {
5546 if (mcp->mb[1] != 0x7)
5547 ha->link_data_rate = mcp->mb[1];
5548
5549 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5550 if (mcp->mb[4] & BIT_0)
5551 ql_log(ql_log_info, vha, 0x11a2,
5552 "FEC=enabled (data rate).\n");
5553 }
5554
5555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5556 "Done %s.\n", __func__);
5557 if (mcp->mb[1] != 0x7)
5558 ha->link_data_rate = mcp->mb[1];
5559 }
5560
5561 return rval;
5562 }
5563
5564 int
5565 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5566 {
5567 int rval;
5568 mbx_cmd_t mc;
5569 mbx_cmd_t *mcp = &mc;
5570 struct qla_hw_data *ha = vha->hw;
5571
5572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5573 "Entered %s.\n", __func__);
5574
5575 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5576 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5577 return QLA_FUNCTION_FAILED;
5578 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5579 mcp->out_mb = MBX_0;
5580 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5581 mcp->tov = MBX_TOV_SECONDS;
5582 mcp->flags = 0;
5583
5584 rval = qla2x00_mailbox_command(vha, mcp);
5585
5586 if (rval != QLA_SUCCESS) {
5587 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5588 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5589 } else {
5590 /* Copy all bits to preserve original value */
5591 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5592
5593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5594 "Done %s.\n", __func__);
5595 }
5596 return rval;
5597 }
5598
5599 int
5600 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5601 {
5602 int rval;
5603 mbx_cmd_t mc;
5604 mbx_cmd_t *mcp = &mc;
5605
5606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5607 "Entered %s.\n", __func__);
5608
5609 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5610 /* Copy all bits to preserve original setting */
5611 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5612 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5613 mcp->in_mb = MBX_0;
5614 mcp->tov = MBX_TOV_SECONDS;
5615 mcp->flags = 0;
5616 rval = qla2x00_mailbox_command(vha, mcp);
5617
5618 if (rval != QLA_SUCCESS) {
5619 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5620 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5621 } else
5622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5623 "Done %s.\n", __func__);
5624
5625 return rval;
5626 }
5627
5628
5629 int
5630 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5631 uint16_t *mb)
5632 {
5633 int rval;
5634 mbx_cmd_t mc;
5635 mbx_cmd_t *mcp = &mc;
5636 struct qla_hw_data *ha = vha->hw;
5637
5638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5639 "Entered %s.\n", __func__);
5640
5641 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5642 return QLA_FUNCTION_FAILED;
5643
5644 mcp->mb[0] = MBC_PORT_PARAMS;
5645 mcp->mb[1] = loop_id;
5646 if (ha->flags.fcp_prio_enabled)
5647 mcp->mb[2] = BIT_1;
5648 else
5649 mcp->mb[2] = BIT_2;
5650 mcp->mb[4] = priority & 0xf;
5651 mcp->mb[9] = vha->vp_idx;
5652 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5653 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5654 mcp->tov = 30;
5655 mcp->flags = 0;
5656 rval = qla2x00_mailbox_command(vha, mcp);
5657 if (mb != NULL) {
5658 mb[0] = mcp->mb[0];
5659 mb[1] = mcp->mb[1];
5660 mb[3] = mcp->mb[3];
5661 mb[4] = mcp->mb[4];
5662 }
5663
5664 if (rval != QLA_SUCCESS) {
5665 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5666 } else {
5667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5668 "Done %s.\n", __func__);
5669 }
5670
5671 return rval;
5672 }
5673
5674 int
5675 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5676 {
5677 int rval = QLA_FUNCTION_FAILED;
5678 struct qla_hw_data *ha = vha->hw;
5679 uint8_t byte;
5680
5681 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5682 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5683 "Thermal not supported by this card.\n");
5684 return rval;
5685 }
5686
5687 if (IS_QLA25XX(ha)) {
5688 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5689 ha->pdev->subsystem_device == 0x0175) {
5690 rval = qla2x00_read_sfp(vha, 0, &byte,
5691 0x98, 0x1, 1, BIT_13|BIT_0);
5692 *temp = byte;
5693 return rval;
5694 }
5695 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5696 ha->pdev->subsystem_device == 0x338e) {
5697 rval = qla2x00_read_sfp(vha, 0, &byte,
5698 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5699 *temp = byte;
5700 return rval;
5701 }
5702 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5703 "Thermal not supported by this card.\n");
5704 return rval;
5705 }
5706
5707 if (IS_QLA82XX(ha)) {
5708 *temp = qla82xx_read_temperature(vha);
5709 rval = QLA_SUCCESS;
5710 return rval;
5711 } else if (IS_QLA8044(ha)) {
5712 *temp = qla8044_read_temperature(vha);
5713 rval = QLA_SUCCESS;
5714 return rval;
5715 }
5716
5717 rval = qla2x00_read_asic_temperature(vha, temp);
5718 return rval;
5719 }
5720
5721 int
5722 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5723 {
5724 int rval;
5725 struct qla_hw_data *ha = vha->hw;
5726 mbx_cmd_t mc;
5727 mbx_cmd_t *mcp = &mc;
5728
5729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5730 "Entered %s.\n", __func__);
5731
5732 if (!IS_FWI2_CAPABLE(ha))
5733 return QLA_FUNCTION_FAILED;
5734
5735 memset(mcp, 0, sizeof(mbx_cmd_t));
5736 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5737 mcp->mb[1] = 1;
5738
5739 mcp->out_mb = MBX_1|MBX_0;
5740 mcp->in_mb = MBX_0;
5741 mcp->tov = 30;
5742 mcp->flags = 0;
5743
5744 rval = qla2x00_mailbox_command(vha, mcp);
5745 if (rval != QLA_SUCCESS) {
5746 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5747 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5748 } else {
5749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5750 "Done %s.\n", __func__);
5751 }
5752
5753 return rval;
5754 }
5755
5756 int
5757 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5758 {
5759 int rval;
5760 struct qla_hw_data *ha = vha->hw;
5761 mbx_cmd_t mc;
5762 mbx_cmd_t *mcp = &mc;
5763
5764 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5765 "Entered %s.\n", __func__);
5766
5767 if (!IS_P3P_TYPE(ha))
5768 return QLA_FUNCTION_FAILED;
5769
5770 memset(mcp, 0, sizeof(mbx_cmd_t));
5771 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5772 mcp->mb[1] = 0;
5773
5774 mcp->out_mb = MBX_1|MBX_0;
5775 mcp->in_mb = MBX_0;
5776 mcp->tov = 30;
5777 mcp->flags = 0;
5778
5779 rval = qla2x00_mailbox_command(vha, mcp);
5780 if (rval != QLA_SUCCESS) {
5781 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5782 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5783 } else {
5784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5785 "Done %s.\n", __func__);
5786 }
5787
5788 return rval;
5789 }
5790
5791 int
5792 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5793 {
5794 struct qla_hw_data *ha = vha->hw;
5795 mbx_cmd_t mc;
5796 mbx_cmd_t *mcp = &mc;
5797 int rval = QLA_FUNCTION_FAILED;
5798
5799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5800 "Entered %s.\n", __func__);
5801
5802 memset(mcp->mb, 0 , sizeof(mcp->mb));
5803 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5804 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5805 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5806 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5807
5808 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5809 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5810 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5811
5812 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5813 mcp->tov = MBX_TOV_SECONDS;
5814 rval = qla2x00_mailbox_command(vha, mcp);
5815
5816 /* Always copy back return mailbox values. */
5817 if (rval != QLA_SUCCESS) {
5818 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5819 "mailbox command FAILED=0x%x, subcode=%x.\n",
5820 (mcp->mb[1] << 16) | mcp->mb[0],
5821 (mcp->mb[3] << 16) | mcp->mb[2]);
5822 } else {
5823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5824 "Done %s.\n", __func__);
5825 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5826 if (!ha->md_template_size) {
5827 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5828 "Null template size obtained.\n");
5829 rval = QLA_FUNCTION_FAILED;
5830 }
5831 }
5832 return rval;
5833 }
5834
5835 int
5836 qla82xx_md_get_template(scsi_qla_host_t *vha)
5837 {
5838 struct qla_hw_data *ha = vha->hw;
5839 mbx_cmd_t mc;
5840 mbx_cmd_t *mcp = &mc;
5841 int rval = QLA_FUNCTION_FAILED;
5842
5843 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5844 "Entered %s.\n", __func__);
5845
5846 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5847 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5848 if (!ha->md_tmplt_hdr) {
5849 ql_log(ql_log_warn, vha, 0x1124,
5850 "Unable to allocate memory for Minidump template.\n");
5851 return rval;
5852 }
5853
5854 memset(mcp->mb, 0 , sizeof(mcp->mb));
5855 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5856 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5857 mcp->mb[2] = LSW(RQST_TMPLT);
5858 mcp->mb[3] = MSW(RQST_TMPLT);
5859 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5860 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5861 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5862 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5863 mcp->mb[8] = LSW(ha->md_template_size);
5864 mcp->mb[9] = MSW(ha->md_template_size);
5865
5866 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5867 mcp->tov = MBX_TOV_SECONDS;
5868 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5869 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5870 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5871 rval = qla2x00_mailbox_command(vha, mcp);
5872
5873 if (rval != QLA_SUCCESS) {
5874 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5875 "mailbox command FAILED=0x%x, subcode=%x.\n",
5876 ((mcp->mb[1] << 16) | mcp->mb[0]),
5877 ((mcp->mb[3] << 16) | mcp->mb[2]));
5878 } else
5879 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5880 "Done %s.\n", __func__);
5881 return rval;
5882 }
5883
5884 int
5885 qla8044_md_get_template(scsi_qla_host_t *vha)
5886 {
5887 struct qla_hw_data *ha = vha->hw;
5888 mbx_cmd_t mc;
5889 mbx_cmd_t *mcp = &mc;
5890 int rval = QLA_FUNCTION_FAILED;
5891 int offset = 0, size = MINIDUMP_SIZE_36K;
5892
5893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5894 "Entered %s.\n", __func__);
5895
5896 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5897 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5898 if (!ha->md_tmplt_hdr) {
5899 ql_log(ql_log_warn, vha, 0xb11b,
5900 "Unable to allocate memory for Minidump template.\n");
5901 return rval;
5902 }
5903
5904 memset(mcp->mb, 0 , sizeof(mcp->mb));
5905 while (offset < ha->md_template_size) {
5906 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5907 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5908 mcp->mb[2] = LSW(RQST_TMPLT);
5909 mcp->mb[3] = MSW(RQST_TMPLT);
5910 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5911 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5912 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5913 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5914 mcp->mb[8] = LSW(size);
5915 mcp->mb[9] = MSW(size);
5916 mcp->mb[10] = offset & 0x0000FFFF;
5917 mcp->mb[11] = offset & 0xFFFF0000;
5918 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5919 mcp->tov = MBX_TOV_SECONDS;
5920 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5921 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5922 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5923 rval = qla2x00_mailbox_command(vha, mcp);
5924
5925 if (rval != QLA_SUCCESS) {
5926 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5927 "mailbox command FAILED=0x%x, subcode=%x.\n",
5928 ((mcp->mb[1] << 16) | mcp->mb[0]),
5929 ((mcp->mb[3] << 16) | mcp->mb[2]));
5930 return rval;
5931 } else
5932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5933 "Done %s.\n", __func__);
5934 offset = offset + size;
5935 }
5936 return rval;
5937 }
5938
5939 int
5940 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5941 {
5942 int rval;
5943 struct qla_hw_data *ha = vha->hw;
5944 mbx_cmd_t mc;
5945 mbx_cmd_t *mcp = &mc;
5946
5947 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5948 return QLA_FUNCTION_FAILED;
5949
5950 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5951 "Entered %s.\n", __func__);
5952
5953 memset(mcp, 0, sizeof(mbx_cmd_t));
5954 mcp->mb[0] = MBC_SET_LED_CONFIG;
5955 mcp->mb[1] = led_cfg[0];
5956 mcp->mb[2] = led_cfg[1];
5957 if (IS_QLA8031(ha)) {
5958 mcp->mb[3] = led_cfg[2];
5959 mcp->mb[4] = led_cfg[3];
5960 mcp->mb[5] = led_cfg[4];
5961 mcp->mb[6] = led_cfg[5];
5962 }
5963
5964 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5965 if (IS_QLA8031(ha))
5966 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5967 mcp->in_mb = MBX_0;
5968 mcp->tov = 30;
5969 mcp->flags = 0;
5970
5971 rval = qla2x00_mailbox_command(vha, mcp);
5972 if (rval != QLA_SUCCESS) {
5973 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5974 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5975 } else {
5976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5977 "Done %s.\n", __func__);
5978 }
5979
5980 return rval;
5981 }
5982
5983 int
5984 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5985 {
5986 int rval;
5987 struct qla_hw_data *ha = vha->hw;
5988 mbx_cmd_t mc;
5989 mbx_cmd_t *mcp = &mc;
5990
5991 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5992 return QLA_FUNCTION_FAILED;
5993
5994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5995 "Entered %s.\n", __func__);
5996
5997 memset(mcp, 0, sizeof(mbx_cmd_t));
5998 mcp->mb[0] = MBC_GET_LED_CONFIG;
5999
6000 mcp->out_mb = MBX_0;
6001 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6002 if (IS_QLA8031(ha))
6003 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6004 mcp->tov = 30;
6005 mcp->flags = 0;
6006
6007 rval = qla2x00_mailbox_command(vha, mcp);
6008 if (rval != QLA_SUCCESS) {
6009 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6010 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6011 } else {
6012 led_cfg[0] = mcp->mb[1];
6013 led_cfg[1] = mcp->mb[2];
6014 if (IS_QLA8031(ha)) {
6015 led_cfg[2] = mcp->mb[3];
6016 led_cfg[3] = mcp->mb[4];
6017 led_cfg[4] = mcp->mb[5];
6018 led_cfg[5] = mcp->mb[6];
6019 }
6020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6021 "Done %s.\n", __func__);
6022 }
6023
6024 return rval;
6025 }
6026
6027 int
6028 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6029 {
6030 int rval;
6031 struct qla_hw_data *ha = vha->hw;
6032 mbx_cmd_t mc;
6033 mbx_cmd_t *mcp = &mc;
6034
6035 if (!IS_P3P_TYPE(ha))
6036 return QLA_FUNCTION_FAILED;
6037
6038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6039 "Entered %s.\n", __func__);
6040
6041 memset(mcp, 0, sizeof(mbx_cmd_t));
6042 mcp->mb[0] = MBC_SET_LED_CONFIG;
6043 if (enable)
6044 mcp->mb[7] = 0xE;
6045 else
6046 mcp->mb[7] = 0xD;
6047
6048 mcp->out_mb = MBX_7|MBX_0;
6049 mcp->in_mb = MBX_0;
6050 mcp->tov = MBX_TOV_SECONDS;
6051 mcp->flags = 0;
6052
6053 rval = qla2x00_mailbox_command(vha, mcp);
6054 if (rval != QLA_SUCCESS) {
6055 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6056 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6057 } else {
6058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6059 "Done %s.\n", __func__);
6060 }
6061
6062 return rval;
6063 }
6064
6065 int
6066 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6067 {
6068 int rval;
6069 struct qla_hw_data *ha = vha->hw;
6070 mbx_cmd_t mc;
6071 mbx_cmd_t *mcp = &mc;
6072
6073 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6074 return QLA_FUNCTION_FAILED;
6075
6076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6077 "Entered %s.\n", __func__);
6078
6079 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6080 mcp->mb[1] = LSW(reg);
6081 mcp->mb[2] = MSW(reg);
6082 mcp->mb[3] = LSW(data);
6083 mcp->mb[4] = MSW(data);
6084 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6085
6086 mcp->in_mb = MBX_1|MBX_0;
6087 mcp->tov = MBX_TOV_SECONDS;
6088 mcp->flags = 0;
6089 rval = qla2x00_mailbox_command(vha, mcp);
6090
6091 if (rval != QLA_SUCCESS) {
6092 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6093 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6094 } else {
6095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6096 "Done %s.\n", __func__);
6097 }
6098
6099 return rval;
6100 }
6101
6102 int
6103 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6104 {
6105 int rval;
6106 struct qla_hw_data *ha = vha->hw;
6107 mbx_cmd_t mc;
6108 mbx_cmd_t *mcp = &mc;
6109
6110 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6112 "Implicit LOGO Unsupported.\n");
6113 return QLA_FUNCTION_FAILED;
6114 }
6115
6116
6117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6118 "Entering %s.\n", __func__);
6119
6120 /* Perform Implicit LOGO. */
6121 mcp->mb[0] = MBC_PORT_LOGOUT;
6122 mcp->mb[1] = fcport->loop_id;
6123 mcp->mb[10] = BIT_15;
6124 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6125 mcp->in_mb = MBX_0;
6126 mcp->tov = MBX_TOV_SECONDS;
6127 mcp->flags = 0;
6128 rval = qla2x00_mailbox_command(vha, mcp);
6129 if (rval != QLA_SUCCESS)
6130 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6131 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6132 else
6133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6134 "Done %s.\n", __func__);
6135
6136 return rval;
6137 }
6138
6139 int
6140 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6141 {
6142 int rval;
6143 mbx_cmd_t mc;
6144 mbx_cmd_t *mcp = &mc;
6145 struct qla_hw_data *ha = vha->hw;
6146 unsigned long retry_max_time = jiffies + (2 * HZ);
6147
6148 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6149 return QLA_FUNCTION_FAILED;
6150
6151 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6152
6153 retry_rd_reg:
6154 mcp->mb[0] = MBC_READ_REMOTE_REG;
6155 mcp->mb[1] = LSW(reg);
6156 mcp->mb[2] = MSW(reg);
6157 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6158 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6159 mcp->tov = MBX_TOV_SECONDS;
6160 mcp->flags = 0;
6161 rval = qla2x00_mailbox_command(vha, mcp);
6162
6163 if (rval != QLA_SUCCESS) {
6164 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6165 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6166 rval, mcp->mb[0], mcp->mb[1]);
6167 } else {
6168 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6169 if (*data == QLA8XXX_BAD_VALUE) {
6170 /*
6171 * During soft-reset CAMRAM register reads might
6172 * return 0xbad0bad0. So retry for MAX of 2 sec
6173 * while reading camram registers.
6174 */
6175 if (time_after(jiffies, retry_max_time)) {
6176 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6177 "Failure to read CAMRAM register. "
6178 "data=0x%x.\n", *data);
6179 return QLA_FUNCTION_FAILED;
6180 }
6181 msleep(100);
6182 goto retry_rd_reg;
6183 }
6184 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6185 }
6186
6187 return rval;
6188 }
6189
6190 int
6191 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6192 {
6193 int rval;
6194 mbx_cmd_t mc;
6195 mbx_cmd_t *mcp = &mc;
6196 struct qla_hw_data *ha = vha->hw;
6197
6198 if (!IS_QLA83XX(ha))
6199 return QLA_FUNCTION_FAILED;
6200
6201 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6202
6203 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6204 mcp->out_mb = MBX_0;
6205 mcp->in_mb = MBX_1|MBX_0;
6206 mcp->tov = MBX_TOV_SECONDS;
6207 mcp->flags = 0;
6208 rval = qla2x00_mailbox_command(vha, mcp);
6209
6210 if (rval != QLA_SUCCESS) {
6211 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6212 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6213 rval, mcp->mb[0], mcp->mb[1]);
6214 ha->isp_ops->fw_dump(vha, 0);
6215 } else {
6216 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6217 }
6218
6219 return rval;
6220 }
6221
6222 int
6223 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6224 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6225 {
6226 int rval;
6227 mbx_cmd_t mc;
6228 mbx_cmd_t *mcp = &mc;
6229 uint8_t subcode = (uint8_t)options;
6230 struct qla_hw_data *ha = vha->hw;
6231
6232 if (!IS_QLA8031(ha))
6233 return QLA_FUNCTION_FAILED;
6234
6235 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6236
6237 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6238 mcp->mb[1] = options;
6239 mcp->out_mb = MBX_1|MBX_0;
6240 if (subcode & BIT_2) {
6241 mcp->mb[2] = LSW(start_addr);
6242 mcp->mb[3] = MSW(start_addr);
6243 mcp->mb[4] = LSW(end_addr);
6244 mcp->mb[5] = MSW(end_addr);
6245 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6246 }
6247 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6248 if (!(subcode & (BIT_2 | BIT_5)))
6249 mcp->in_mb |= MBX_4|MBX_3;
6250 mcp->tov = MBX_TOV_SECONDS;
6251 mcp->flags = 0;
6252 rval = qla2x00_mailbox_command(vha, mcp);
6253
6254 if (rval != QLA_SUCCESS) {
6255 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6256 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6257 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6258 mcp->mb[4]);
6259 ha->isp_ops->fw_dump(vha, 0);
6260 } else {
6261 if (subcode & BIT_5)
6262 *sector_size = mcp->mb[1];
6263 else if (subcode & (BIT_6 | BIT_7)) {
6264 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6265 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6266 } else if (subcode & (BIT_3 | BIT_4)) {
6267 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6268 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6269 }
6270 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6271 }
6272
6273 return rval;
6274 }
6275
6276 int
6277 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6278 uint32_t size)
6279 {
6280 int rval;
6281 mbx_cmd_t mc;
6282 mbx_cmd_t *mcp = &mc;
6283
6284 if (!IS_MCTP_CAPABLE(vha->hw))
6285 return QLA_FUNCTION_FAILED;
6286
6287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6288 "Entered %s.\n", __func__);
6289
6290 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6291 mcp->mb[1] = LSW(addr);
6292 mcp->mb[2] = MSW(req_dma);
6293 mcp->mb[3] = LSW(req_dma);
6294 mcp->mb[4] = MSW(size);
6295 mcp->mb[5] = LSW(size);
6296 mcp->mb[6] = MSW(MSD(req_dma));
6297 mcp->mb[7] = LSW(MSD(req_dma));
6298 mcp->mb[8] = MSW(addr);
6299 /* Setting RAM ID to valid */
6300 /* For MCTP RAM ID is 0x40 */
6301 mcp->mb[10] = BIT_7 | 0x40;
6302
6303 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6304 MBX_0;
6305
6306 mcp->in_mb = MBX_0;
6307 mcp->tov = MBX_TOV_SECONDS;
6308 mcp->flags = 0;
6309 rval = qla2x00_mailbox_command(vha, mcp);
6310
6311 if (rval != QLA_SUCCESS) {
6312 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6313 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6314 } else {
6315 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6316 "Done %s.\n", __func__);
6317 }
6318
6319 return rval;
6320 }
6321
6322 int
6323 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6324 void *dd_buf, uint size, uint options)
6325 {
6326 int rval;
6327 mbx_cmd_t mc;
6328 mbx_cmd_t *mcp = &mc;
6329 dma_addr_t dd_dma;
6330
6331 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6332 !IS_QLA28XX(vha->hw))
6333 return QLA_FUNCTION_FAILED;
6334
6335 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6336 "Entered %s.\n", __func__);
6337
6338 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6339 dd_buf, size, DMA_FROM_DEVICE);
6340 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6341 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6342 return QLA_MEMORY_ALLOC_FAILED;
6343 }
6344
6345 memset(dd_buf, 0, size);
6346
6347 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6348 mcp->mb[1] = options;
6349 mcp->mb[2] = MSW(LSD(dd_dma));
6350 mcp->mb[3] = LSW(LSD(dd_dma));
6351 mcp->mb[6] = MSW(MSD(dd_dma));
6352 mcp->mb[7] = LSW(MSD(dd_dma));
6353 mcp->mb[8] = size;
6354 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6355 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6356 mcp->buf_size = size;
6357 mcp->flags = MBX_DMA_IN;
6358 mcp->tov = MBX_TOV_SECONDS * 4;
6359 rval = qla2x00_mailbox_command(vha, mcp);
6360
6361 if (rval != QLA_SUCCESS) {
6362 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6363 } else {
6364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6365 "Done %s.\n", __func__);
6366 }
6367
6368 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6369 size, DMA_FROM_DEVICE);
6370
6371 return rval;
6372 }
6373
6374 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6375 {
6376 sp->u.iocb_cmd.u.mbx.rc = res;
6377
6378 complete(&sp->u.iocb_cmd.u.mbx.comp);
6379 /* don't free sp here. Let the caller do the free */
6380 }
6381
6382 /*
6383 * This mailbox uses the iocb interface to send MB command.
6384 * This allows non-critial (non chip setup) command to go
6385 * out in parrallel.
6386 */
6387 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6388 {
6389 int rval = QLA_FUNCTION_FAILED;
6390 srb_t *sp;
6391 struct srb_iocb *c;
6392
6393 if (!vha->hw->flags.fw_started)
6394 goto done;
6395
6396 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6397 if (!sp)
6398 goto done;
6399
6400 sp->type = SRB_MB_IOCB;
6401 sp->name = mb_to_str(mcp->mb[0]);
6402
6403 c = &sp->u.iocb_cmd;
6404 c->timeout = qla2x00_async_iocb_timeout;
6405 init_completion(&c->u.mbx.comp);
6406
6407 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6408
6409 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6410
6411 sp->done = qla2x00_async_mb_sp_done;
6412
6413 rval = qla2x00_start_sp(sp);
6414 if (rval != QLA_SUCCESS) {
6415 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6416 "%s: %s Failed submission. %x.\n",
6417 __func__, sp->name, rval);
6418 goto done_free_sp;
6419 }
6420
6421 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6422 sp->name, sp->handle);
6423
6424 wait_for_completion(&c->u.mbx.comp);
6425 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6426
6427 rval = c->u.mbx.rc;
6428 switch (rval) {
6429 case QLA_FUNCTION_TIMEOUT:
6430 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6431 __func__, sp->name, rval);
6432 break;
6433 case QLA_SUCCESS:
6434 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6435 __func__, sp->name);
6436 break;
6437 default:
6438 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6439 __func__, sp->name, rval);
6440 break;
6441 }
6442
6443 done_free_sp:
6444 sp->free(sp);
6445 done:
6446 return rval;
6447 }
6448
6449 /*
6450 * qla24xx_gpdb_wait
6451 * NOTE: Do not call this routine from DPC thread
6452 */
6453 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6454 {
6455 int rval = QLA_FUNCTION_FAILED;
6456 dma_addr_t pd_dma;
6457 struct port_database_24xx *pd;
6458 struct qla_hw_data *ha = vha->hw;
6459 mbx_cmd_t mc;
6460
6461 if (!vha->hw->flags.fw_started)
6462 goto done;
6463
6464 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6465 if (pd == NULL) {
6466 ql_log(ql_log_warn, vha, 0xd047,
6467 "Failed to allocate port database structure.\n");
6468 goto done_free_sp;
6469 }
6470
6471 memset(&mc, 0, sizeof(mc));
6472 mc.mb[0] = MBC_GET_PORT_DATABASE;
6473 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6474 mc.mb[2] = MSW(pd_dma);
6475 mc.mb[3] = LSW(pd_dma);
6476 mc.mb[6] = MSW(MSD(pd_dma));
6477 mc.mb[7] = LSW(MSD(pd_dma));
6478 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6479 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6480
6481 rval = qla24xx_send_mb_cmd(vha, &mc);
6482 if (rval != QLA_SUCCESS) {
6483 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6484 "%s: %8phC fail\n", __func__, fcport->port_name);
6485 goto done_free_sp;
6486 }
6487
6488 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6489
6490 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6491 __func__, fcport->port_name);
6492
6493 done_free_sp:
6494 if (pd)
6495 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6496 done:
6497 return rval;
6498 }
6499
6500 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6501 struct port_database_24xx *pd)
6502 {
6503 int rval = QLA_SUCCESS;
6504 uint64_t zero = 0;
6505 u8 current_login_state, last_login_state;
6506
6507 if (NVME_TARGET(vha->hw, fcport)) {
6508 current_login_state = pd->current_login_state >> 4;
6509 last_login_state = pd->last_login_state >> 4;
6510 } else {
6511 current_login_state = pd->current_login_state & 0xf;
6512 last_login_state = pd->last_login_state & 0xf;
6513 }
6514
6515 /* Check for logged in state. */
6516 if (current_login_state != PDS_PRLI_COMPLETE) {
6517 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6518 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6519 current_login_state, last_login_state, fcport->loop_id);
6520 rval = QLA_FUNCTION_FAILED;
6521 goto gpd_error_out;
6522 }
6523
6524 if (fcport->loop_id == FC_NO_LOOP_ID ||
6525 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6526 memcmp(fcport->port_name, pd->port_name, 8))) {
6527 /* We lost the device mid way. */
6528 rval = QLA_NOT_LOGGED_IN;
6529 goto gpd_error_out;
6530 }
6531
6532 /* Names are little-endian. */
6533 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6534 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6535
6536 /* Get port_id of device. */
6537 fcport->d_id.b.domain = pd->port_id[0];
6538 fcport->d_id.b.area = pd->port_id[1];
6539 fcport->d_id.b.al_pa = pd->port_id[2];
6540 fcport->d_id.b.rsvd_1 = 0;
6541
6542 if (NVME_TARGET(vha->hw, fcport)) {
6543 fcport->port_type = FCT_NVME;
6544 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6545 fcport->port_type |= FCT_NVME_INITIATOR;
6546 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6547 fcport->port_type |= FCT_NVME_TARGET;
6548 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6549 fcport->port_type |= FCT_NVME_DISCOVERY;
6550 } else {
6551 /* If not target must be initiator or unknown type. */
6552 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6553 fcport->port_type = FCT_INITIATOR;
6554 else
6555 fcport->port_type = FCT_TARGET;
6556 }
6557 /* Passback COS information. */
6558 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6559 FC_COS_CLASS2 : FC_COS_CLASS3;
6560
6561 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6562 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6563 fcport->conf_compl_supported = 1;
6564 }
6565
6566 gpd_error_out:
6567 return rval;
6568 }
6569
6570 /*
6571 * qla24xx_gidlist__wait
6572 * NOTE: don't call this routine from DPC thread.
6573 */
6574 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6575 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6576 {
6577 int rval = QLA_FUNCTION_FAILED;
6578 mbx_cmd_t mc;
6579
6580 if (!vha->hw->flags.fw_started)
6581 goto done;
6582
6583 memset(&mc, 0, sizeof(mc));
6584 mc.mb[0] = MBC_GET_ID_LIST;
6585 mc.mb[2] = MSW(id_list_dma);
6586 mc.mb[3] = LSW(id_list_dma);
6587 mc.mb[6] = MSW(MSD(id_list_dma));
6588 mc.mb[7] = LSW(MSD(id_list_dma));
6589 mc.mb[8] = 0;
6590 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6591
6592 rval = qla24xx_send_mb_cmd(vha, &mc);
6593 if (rval != QLA_SUCCESS) {
6594 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6595 "%s: fail\n", __func__);
6596 } else {
6597 *entries = mc.mb[1];
6598 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6599 "%s: done\n", __func__);
6600 }
6601 done:
6602 return rval;
6603 }
6604
6605 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6606 {
6607 int rval;
6608 mbx_cmd_t mc;
6609 mbx_cmd_t *mcp = &mc;
6610
6611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6612 "Entered %s\n", __func__);
6613
6614 memset(mcp->mb, 0 , sizeof(mcp->mb));
6615 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6616 mcp->mb[1] = cpu_to_le16(1);
6617 mcp->mb[2] = cpu_to_le16(value);
6618 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6619 mcp->in_mb = MBX_2 | MBX_0;
6620 mcp->tov = MBX_TOV_SECONDS;
6621 mcp->flags = 0;
6622
6623 rval = qla2x00_mailbox_command(vha, mcp);
6624
6625 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6626 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6627
6628 return rval;
6629 }
6630
6631 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6632 {
6633 int rval;
6634 mbx_cmd_t mc;
6635 mbx_cmd_t *mcp = &mc;
6636
6637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6638 "Entered %s\n", __func__);
6639
6640 memset(mcp->mb, 0, sizeof(mcp->mb));
6641 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6642 mcp->mb[1] = cpu_to_le16(0);
6643 mcp->out_mb = MBX_1 | MBX_0;
6644 mcp->in_mb = MBX_2 | MBX_0;
6645 mcp->tov = MBX_TOV_SECONDS;
6646 mcp->flags = 0;
6647
6648 rval = qla2x00_mailbox_command(vha, mcp);
6649 if (rval == QLA_SUCCESS)
6650 *value = mc.mb[2];
6651
6652 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6653 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6654
6655 return rval;
6656 }
6657
6658 int
6659 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6660 {
6661 struct qla_hw_data *ha = vha->hw;
6662 uint16_t iter, addr, offset;
6663 dma_addr_t phys_addr;
6664 int rval, c;
6665 u8 *sfp_data;
6666
6667 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6668 addr = 0xa0;
6669 phys_addr = ha->sfp_data_dma;
6670 sfp_data = ha->sfp_data;
6671 offset = c = 0;
6672
6673 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6674 if (iter == 4) {
6675 /* Skip to next device address. */
6676 addr = 0xa2;
6677 offset = 0;
6678 }
6679
6680 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6681 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6682 if (rval != QLA_SUCCESS) {
6683 ql_log(ql_log_warn, vha, 0x706d,
6684 "Unable to read SFP data (%x/%x/%x).\n", rval,
6685 addr, offset);
6686
6687 return rval;
6688 }
6689
6690 if (buf && (c < count)) {
6691 u16 sz;
6692
6693 if ((count - c) >= SFP_BLOCK_SIZE)
6694 sz = SFP_BLOCK_SIZE;
6695 else
6696 sz = count - c;
6697
6698 memcpy(buf, sfp_data, sz);
6699 buf += SFP_BLOCK_SIZE;
6700 c += sz;
6701 }
6702 phys_addr += SFP_BLOCK_SIZE;
6703 sfp_data += SFP_BLOCK_SIZE;
6704 offset += SFP_BLOCK_SIZE;
6705 }
6706
6707 return rval;
6708 }
6709
6710 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6711 uint16_t *out_mb, int out_mb_sz)
6712 {
6713 int rval = QLA_FUNCTION_FAILED;
6714 mbx_cmd_t mc;
6715
6716 if (!vha->hw->flags.fw_started)
6717 goto done;
6718
6719 memset(&mc, 0, sizeof(mc));
6720 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6721
6722 rval = qla24xx_send_mb_cmd(vha, &mc);
6723 if (rval != QLA_SUCCESS) {
6724 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6725 "%s: fail\n", __func__);
6726 } else {
6727 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6728 memcpy(out_mb, mc.mb, out_mb_sz);
6729 else
6730 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6731
6732 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6733 "%s: done\n", __func__);
6734 }
6735 done:
6736 return rval;
6737 }
6738
6739 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6740 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6741 uint32_t sfub_len)
6742 {
6743 int rval;
6744 mbx_cmd_t mc;
6745 mbx_cmd_t *mcp = &mc;
6746
6747 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6748 mcp->mb[1] = opts;
6749 mcp->mb[2] = region;
6750 mcp->mb[3] = MSW(len);
6751 mcp->mb[4] = LSW(len);
6752 mcp->mb[5] = MSW(sfub_dma_addr);
6753 mcp->mb[6] = LSW(sfub_dma_addr);
6754 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6755 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6756 mcp->mb[9] = sfub_len;
6757 mcp->out_mb =
6758 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6759 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6760 mcp->tov = MBX_TOV_SECONDS;
6761 mcp->flags = 0;
6762 rval = qla2x00_mailbox_command(vha, mcp);
6763
6764 if (rval != QLA_SUCCESS) {
6765 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6766 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6767 mcp->mb[2]);
6768 }
6769
6770 return rval;
6771 }
6772
6773 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6774 uint32_t data)
6775 {
6776 int rval;
6777 mbx_cmd_t mc;
6778 mbx_cmd_t *mcp = &mc;
6779
6780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6781 "Entered %s.\n", __func__);
6782
6783 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6784 mcp->mb[1] = LSW(addr);
6785 mcp->mb[2] = MSW(addr);
6786 mcp->mb[3] = LSW(data);
6787 mcp->mb[4] = MSW(data);
6788 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6789 mcp->in_mb = MBX_1|MBX_0;
6790 mcp->tov = MBX_TOV_SECONDS;
6791 mcp->flags = 0;
6792 rval = qla2x00_mailbox_command(vha, mcp);
6793
6794 if (rval != QLA_SUCCESS) {
6795 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6796 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6797 } else {
6798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6799 "Done %s.\n", __func__);
6800 }
6801
6802 return rval;
6803 }
6804
6805 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6806 uint32_t *data)
6807 {
6808 int rval;
6809 mbx_cmd_t mc;
6810 mbx_cmd_t *mcp = &mc;
6811
6812 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6813 "Entered %s.\n", __func__);
6814
6815 mcp->mb[0] = MBC_READ_REMOTE_REG;
6816 mcp->mb[1] = LSW(addr);
6817 mcp->mb[2] = MSW(addr);
6818 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6819 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6820 mcp->tov = MBX_TOV_SECONDS;
6821 mcp->flags = 0;
6822 rval = qla2x00_mailbox_command(vha, mcp);
6823
6824 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6825
6826 if (rval != QLA_SUCCESS) {
6827 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6829 } else {
6830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6831 "Done %s.\n", __func__);
6832 }
6833
6834 return rval;
6835 }
6836
6837 int
6838 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6839 {
6840 struct qla_hw_data *ha = vha->hw;
6841 mbx_cmd_t mc;
6842 mbx_cmd_t *mcp = &mc;
6843 int rval;
6844
6845 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6846 return QLA_FUNCTION_FAILED;
6847
6848 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6849 __func__, options);
6850
6851 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6852 mcp->mb[1] = options;
6853 mcp->out_mb = MBX_1|MBX_0;
6854 mcp->in_mb = MBX_1|MBX_0;
6855 if (options & BIT_0) {
6856 if (options & BIT_1) {
6857 mcp->mb[2] = led[2];
6858 mcp->out_mb |= MBX_2;
6859 }
6860 if (options & BIT_2) {
6861 mcp->mb[3] = led[0];
6862 mcp->out_mb |= MBX_3;
6863 }
6864 if (options & BIT_3) {
6865 mcp->mb[4] = led[1];
6866 mcp->out_mb |= MBX_4;
6867 }
6868 } else {
6869 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6870 }
6871 mcp->tov = MBX_TOV_SECONDS;
6872 mcp->flags = 0;
6873 rval = qla2x00_mailbox_command(vha, mcp);
6874 if (rval) {
6875 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6876 __func__, rval, mcp->mb[0], mcp->mb[1]);
6877 return rval;
6878 }
6879
6880 if (options & BIT_0) {
6881 ha->beacon_blink_led = 0;
6882 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6883 } else {
6884 led[2] = mcp->mb[2];
6885 led[0] = mcp->mb[3];
6886 led[1] = mcp->mb[4];
6887 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6888 __func__, led[0], led[1], led[2]);
6889 }
6890
6891 return rval;
6892 }