]> git.ipfire.org Git - people/ms/linux.git/blob - drivers/scsi/qla2xxx/qla_mbx.c
Merge branch 'fixes' into misc
[people/ms/linux.git] / drivers / scsi / qla2xxx / qla_mbx.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
12
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
21 };
22
23 static const char *mb_to_str(uint16_t cmd)
24 {
25 int i;
26 struct mb_cmd_name *e;
27
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 e = mb_str + i;
30 if (cmd == e->cmd)
31 return e->str;
32 }
33 return "unknown";
34 }
35
36 static struct rom_cmd {
37 uint16_t cmd;
38 } rom_cmds[] = {
39 { MBC_LOAD_RAM },
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP },
62 };
63
64 static int is_rom_cmd(uint16_t cmd)
65 {
66 int i;
67 struct rom_cmd *wc;
68
69 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
70 wc = rom_cmds + i;
71 if (wc->cmd == cmd)
72 return 1;
73 }
74
75 return 0;
76 }
77
78 /*
79 * qla2x00_mailbox_command
80 * Issue mailbox command and waits for completion.
81 *
82 * Input:
83 * ha = adapter block pointer.
84 * mcp = driver internal mbx struct pointer.
85 *
86 * Output:
87 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
88 *
89 * Returns:
90 * 0 : QLA_SUCCESS = cmd performed success
91 * 1 : QLA_FUNCTION_FAILED (error encountered)
92 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
93 *
94 * Context:
95 * Kernel context.
96 */
97 static int
98 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
99 {
100 int rval, i;
101 unsigned long flags = 0;
102 device_reg_t *reg;
103 uint8_t abort_active;
104 uint8_t io_lock_on;
105 uint16_t command = 0;
106 uint16_t *iptr;
107 uint16_t __iomem *optr;
108 uint32_t cnt;
109 uint32_t mboxes;
110 unsigned long wait_time;
111 struct qla_hw_data *ha = vha->hw;
112 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
113
114
115 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
116
117 if (ha->pdev->error_state > pci_channel_io_frozen) {
118 ql_log(ql_log_warn, vha, 0x1001,
119 "error_state is greater than pci_channel_io_frozen, "
120 "exiting.\n");
121 return QLA_FUNCTION_TIMEOUT;
122 }
123
124 if (vha->device_flags & DFLG_DEV_FAILED) {
125 ql_log(ql_log_warn, vha, 0x1002,
126 "Device in failed state, exiting.\n");
127 return QLA_FUNCTION_TIMEOUT;
128 }
129
130 /* if PCI error, then avoid mbx processing.*/
131 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
132 test_bit(UNLOADING, &base_vha->dpc_flags)) {
133 ql_log(ql_log_warn, vha, 0xd04e,
134 "PCI error, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
136 }
137
138 reg = ha->iobase;
139 io_lock_on = base_vha->flags.init_done;
140
141 rval = QLA_SUCCESS;
142 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
143
144
145 if (ha->flags.pci_channel_io_perm_failure) {
146 ql_log(ql_log_warn, vha, 0x1003,
147 "Perm failure on EEH timeout MBX, exiting.\n");
148 return QLA_FUNCTION_TIMEOUT;
149 }
150
151 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
152 /* Setting Link-Down error */
153 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
154 ql_log(ql_log_warn, vha, 0x1004,
155 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
156 return QLA_FUNCTION_TIMEOUT;
157 }
158
159 /* check if ISP abort is active and return cmd with timeout */
160 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
161 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
162 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
163 !is_rom_cmd(mcp->mb[0])) {
164 ql_log(ql_log_info, vha, 0x1005,
165 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
166 mcp->mb[0]);
167 return QLA_FUNCTION_TIMEOUT;
168 }
169
170 /*
171 * Wait for active mailbox commands to finish by waiting at most tov
172 * seconds. This is to serialize actual issuing of mailbox cmds during
173 * non ISP abort time.
174 */
175 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
176 /* Timeout occurred. Return error. */
177 ql_log(ql_log_warn, vha, 0xd035,
178 "Cmd access timeout, cmd=0x%x, Exiting.\n",
179 mcp->mb[0]);
180 return QLA_FUNCTION_TIMEOUT;
181 }
182
183 ha->flags.mbox_busy = 1;
184 /* Save mailbox command for debug */
185 ha->mcp = mcp;
186
187 ql_dbg(ql_dbg_mbx, vha, 0x1006,
188 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
189
190 spin_lock_irqsave(&ha->hardware_lock, flags);
191
192 /* Load mailbox registers. */
193 if (IS_P3P_TYPE(ha))
194 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
195 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
196 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
197 else
198 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
199
200 iptr = mcp->mb;
201 command = mcp->mb[0];
202 mboxes = mcp->out_mb;
203
204 ql_dbg(ql_dbg_mbx, vha, 0x1111,
205 "Mailbox registers (OUT):\n");
206 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
207 if (IS_QLA2200(ha) && cnt == 8)
208 optr =
209 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
210 if (mboxes & BIT_0) {
211 ql_dbg(ql_dbg_mbx, vha, 0x1112,
212 "mbox[%d]<-0x%04x\n", cnt, *iptr);
213 WRT_REG_WORD(optr, *iptr);
214 }
215
216 mboxes >>= 1;
217 optr++;
218 iptr++;
219 }
220
221 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
222 "I/O Address = %p.\n", optr);
223
224 /* Issue set host interrupt command to send cmd out. */
225 ha->flags.mbox_int = 0;
226 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
227
228 /* Unlock mbx registers and wait for interrupt */
229 ql_dbg(ql_dbg_mbx, vha, 0x100f,
230 "Going to unlock irq & waiting for interrupts. "
231 "jiffies=%lx.\n", jiffies);
232
233 /* Wait for mbx cmd completion until timeout */
234
235 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
236 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
237
238 if (IS_P3P_TYPE(ha)) {
239 if (RD_REG_DWORD(&reg->isp82.hint) &
240 HINT_MBX_INT_PENDING) {
241 spin_unlock_irqrestore(&ha->hardware_lock,
242 flags);
243 ha->flags.mbox_busy = 0;
244 ql_dbg(ql_dbg_mbx, vha, 0x1010,
245 "Pending mailbox timeout, exiting.\n");
246 rval = QLA_FUNCTION_TIMEOUT;
247 goto premature_exit;
248 }
249 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
250 } else if (IS_FWI2_CAPABLE(ha))
251 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
252 else
253 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
254 spin_unlock_irqrestore(&ha->hardware_lock, flags);
255
256 wait_time = jiffies;
257 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
258 mcp->tov * HZ)) {
259 ql_dbg(ql_dbg_mbx, vha, 0x117a,
260 "cmd=%x Timeout.\n", command);
261 spin_lock_irqsave(&ha->hardware_lock, flags);
262 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
264 }
265 if (time_after(jiffies, wait_time + 5 * HZ))
266 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
267 command, jiffies_to_msecs(jiffies - wait_time));
268 } else {
269 ql_dbg(ql_dbg_mbx, vha, 0x1011,
270 "Cmd=%x Polling Mode.\n", command);
271
272 if (IS_P3P_TYPE(ha)) {
273 if (RD_REG_DWORD(&reg->isp82.hint) &
274 HINT_MBX_INT_PENDING) {
275 spin_unlock_irqrestore(&ha->hardware_lock,
276 flags);
277 ha->flags.mbox_busy = 0;
278 ql_dbg(ql_dbg_mbx, vha, 0x1012,
279 "Pending mailbox timeout, exiting.\n");
280 rval = QLA_FUNCTION_TIMEOUT;
281 goto premature_exit;
282 }
283 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
284 } else if (IS_FWI2_CAPABLE(ha))
285 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
286 else
287 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
288 spin_unlock_irqrestore(&ha->hardware_lock, flags);
289
290 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
291 while (!ha->flags.mbox_int) {
292 if (time_after(jiffies, wait_time))
293 break;
294
295 /* Check for pending interrupts. */
296 qla2x00_poll(ha->rsp_q_map[0]);
297
298 if (!ha->flags.mbox_int &&
299 !(IS_QLA2200(ha) &&
300 command == MBC_LOAD_RISC_RAM_EXTENDED))
301 msleep(10);
302 } /* while */
303 ql_dbg(ql_dbg_mbx, vha, 0x1013,
304 "Waited %d sec.\n",
305 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
306 }
307
308 /* Check whether we timed out */
309 if (ha->flags.mbox_int) {
310 uint16_t *iptr2;
311
312 ql_dbg(ql_dbg_mbx, vha, 0x1014,
313 "Cmd=%x completed.\n", command);
314
315 /* Got interrupt. Clear the flag. */
316 ha->flags.mbox_int = 0;
317 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
318
319 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
320 ha->flags.mbox_busy = 0;
321 /* Setting Link-Down error */
322 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
323 ha->mcp = NULL;
324 rval = QLA_FUNCTION_FAILED;
325 ql_log(ql_log_warn, vha, 0xd048,
326 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
327 goto premature_exit;
328 }
329
330 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
331 rval = QLA_FUNCTION_FAILED;
332
333 /* Load return mailbox registers. */
334 iptr2 = mcp->mb;
335 iptr = (uint16_t *)&ha->mailbox_out[0];
336 mboxes = mcp->in_mb;
337
338 ql_dbg(ql_dbg_mbx, vha, 0x1113,
339 "Mailbox registers (IN):\n");
340 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
341 if (mboxes & BIT_0) {
342 *iptr2 = *iptr;
343 ql_dbg(ql_dbg_mbx, vha, 0x1114,
344 "mbox[%d]->0x%04x\n", cnt, *iptr2);
345 }
346
347 mboxes >>= 1;
348 iptr2++;
349 iptr++;
350 }
351 } else {
352
353 uint16_t mb[8];
354 uint32_t ictrl, host_status, hccr;
355 uint16_t w;
356
357 if (IS_FWI2_CAPABLE(ha)) {
358 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
359 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
360 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
361 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
362 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
363 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
364 host_status = RD_REG_DWORD(&reg->isp24.host_status);
365 hccr = RD_REG_DWORD(&reg->isp24.hccr);
366
367 ql_log(ql_log_warn, vha, 0xd04c,
368 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
369 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
370 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
371 mb[7], host_status, hccr);
372
373 } else {
374 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
375 ictrl = RD_REG_WORD(&reg->isp.ictrl);
376 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
377 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
378 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
379 }
380 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
381
382 /* Capture FW dump only, if PCI device active */
383 if (!pci_channel_offline(vha->hw->pdev)) {
384 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
385 if (w == 0xffff || ictrl == 0xffffffff) {
386 /* This is special case if there is unload
387 * of driver happening and if PCI device go
388 * into bad state due to PCI error condition
389 * then only PCI ERR flag would be set.
390 * we will do premature exit for above case.
391 */
392 ha->flags.mbox_busy = 0;
393 rval = QLA_FUNCTION_TIMEOUT;
394 goto premature_exit;
395 }
396
397 /* Attempt to capture firmware dump for further
398 * anallysis of the current formware state. we do not
399 * need to do this if we are intentionally generating
400 * a dump
401 */
402 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
403 ha->isp_ops->fw_dump(vha, 0);
404 rval = QLA_FUNCTION_TIMEOUT;
405 }
406 }
407
408 ha->flags.mbox_busy = 0;
409
410 /* Clean up */
411 ha->mcp = NULL;
412
413 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
414 ql_dbg(ql_dbg_mbx, vha, 0x101a,
415 "Checking for additional resp interrupt.\n");
416
417 /* polling mode for non isp_abort commands. */
418 qla2x00_poll(ha->rsp_q_map[0]);
419 }
420
421 if (rval == QLA_FUNCTION_TIMEOUT &&
422 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
423 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
424 ha->flags.eeh_busy) {
425 /* not in dpc. schedule it for dpc to take over. */
426 ql_dbg(ql_dbg_mbx, vha, 0x101b,
427 "Timeout, schedule isp_abort_needed.\n");
428
429 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
430 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
431 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
432 if (IS_QLA82XX(ha)) {
433 ql_dbg(ql_dbg_mbx, vha, 0x112a,
434 "disabling pause transmit on port "
435 "0 & 1.\n");
436 qla82xx_wr_32(ha,
437 QLA82XX_CRB_NIU + 0x98,
438 CRB_NIU_XG_PAUSE_CTL_P0|
439 CRB_NIU_XG_PAUSE_CTL_P1);
440 }
441 ql_log(ql_log_info, base_vha, 0x101c,
442 "Mailbox cmd timeout occurred, cmd=0x%x, "
443 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
444 "abort.\n", command, mcp->mb[0],
445 ha->flags.eeh_busy);
446 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
447 qla2xxx_wake_dpc(vha);
448 }
449 } else if (!abort_active) {
450 /* call abort directly since we are in the DPC thread */
451 ql_dbg(ql_dbg_mbx, vha, 0x101d,
452 "Timeout, calling abort_isp.\n");
453
454 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
455 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
456 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
457 if (IS_QLA82XX(ha)) {
458 ql_dbg(ql_dbg_mbx, vha, 0x112b,
459 "disabling pause transmit on port "
460 "0 & 1.\n");
461 qla82xx_wr_32(ha,
462 QLA82XX_CRB_NIU + 0x98,
463 CRB_NIU_XG_PAUSE_CTL_P0|
464 CRB_NIU_XG_PAUSE_CTL_P1);
465 }
466 ql_log(ql_log_info, base_vha, 0x101e,
467 "Mailbox cmd timeout occurred, cmd=0x%x, "
468 "mb[0]=0x%x. Scheduling ISP abort ",
469 command, mcp->mb[0]);
470 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
471 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
472 /* Allow next mbx cmd to come in. */
473 complete(&ha->mbx_cmd_comp);
474 if (ha->isp_ops->abort_isp(vha)) {
475 /* Failed. retry later. */
476 set_bit(ISP_ABORT_NEEDED,
477 &vha->dpc_flags);
478 }
479 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
480 ql_dbg(ql_dbg_mbx, vha, 0x101f,
481 "Finished abort_isp.\n");
482 goto mbx_done;
483 }
484 }
485 }
486
487 premature_exit:
488 /* Allow next mbx cmd to come in. */
489 complete(&ha->mbx_cmd_comp);
490
491 mbx_done:
492 if (rval) {
493 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
494 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
495 dev_name(&ha->pdev->dev), 0x1020+0x800,
496 vha->host_no);
497 mboxes = mcp->in_mb;
498 cnt = 4;
499 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
500 if (mboxes & BIT_0) {
501 printk(" mb[%u]=%x", i, mcp->mb[i]);
502 cnt--;
503 }
504 pr_warn(" cmd=%x ****\n", command);
505 }
506 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
507 ql_dbg(ql_dbg_mbx, vha, 0x1198,
508 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
509 RD_REG_DWORD(&reg->isp24.host_status),
510 RD_REG_DWORD(&reg->isp24.ictrl),
511 RD_REG_DWORD(&reg->isp24.istatus));
512 } else {
513 ql_dbg(ql_dbg_mbx, vha, 0x1206,
514 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
515 RD_REG_WORD(&reg->isp.ctrl_status),
516 RD_REG_WORD(&reg->isp.ictrl),
517 RD_REG_WORD(&reg->isp.istatus));
518 }
519 } else {
520 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
521 }
522
523 return rval;
524 }
525
526 int
527 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
528 uint32_t risc_code_size)
529 {
530 int rval;
531 struct qla_hw_data *ha = vha->hw;
532 mbx_cmd_t mc;
533 mbx_cmd_t *mcp = &mc;
534
535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
536 "Entered %s.\n", __func__);
537
538 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
539 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
540 mcp->mb[8] = MSW(risc_addr);
541 mcp->out_mb = MBX_8|MBX_0;
542 } else {
543 mcp->mb[0] = MBC_LOAD_RISC_RAM;
544 mcp->out_mb = MBX_0;
545 }
546 mcp->mb[1] = LSW(risc_addr);
547 mcp->mb[2] = MSW(req_dma);
548 mcp->mb[3] = LSW(req_dma);
549 mcp->mb[6] = MSW(MSD(req_dma));
550 mcp->mb[7] = LSW(MSD(req_dma));
551 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
552 if (IS_FWI2_CAPABLE(ha)) {
553 mcp->mb[4] = MSW(risc_code_size);
554 mcp->mb[5] = LSW(risc_code_size);
555 mcp->out_mb |= MBX_5|MBX_4;
556 } else {
557 mcp->mb[4] = LSW(risc_code_size);
558 mcp->out_mb |= MBX_4;
559 }
560
561 mcp->in_mb = MBX_0;
562 mcp->tov = MBX_TOV_SECONDS;
563 mcp->flags = 0;
564 rval = qla2x00_mailbox_command(vha, mcp);
565
566 if (rval != QLA_SUCCESS) {
567 ql_dbg(ql_dbg_mbx, vha, 0x1023,
568 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
569 } else {
570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
571 "Done %s.\n", __func__);
572 }
573
574 return rval;
575 }
576
577 #define EXTENDED_BB_CREDITS BIT_0
578 #define NVME_ENABLE_FLAG BIT_3
579 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
580 {
581 uint16_t mb4 = BIT_0;
582
583 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
584 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
585
586 return mb4;
587 }
588
589 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
590 {
591 uint16_t mb4 = BIT_0;
592
593 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
594 struct nvram_81xx *nv = ha->nvram;
595
596 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
597 }
598
599 return mb4;
600 }
601
602 /*
603 * qla2x00_execute_fw
604 * Start adapter firmware.
605 *
606 * Input:
607 * ha = adapter block pointer.
608 * TARGET_QUEUE_LOCK must be released.
609 * ADAPTER_STATE_LOCK must be released.
610 *
611 * Returns:
612 * qla2x00 local function return status code.
613 *
614 * Context:
615 * Kernel context.
616 */
617 int
618 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
619 {
620 int rval;
621 struct qla_hw_data *ha = vha->hw;
622 mbx_cmd_t mc;
623 mbx_cmd_t *mcp = &mc;
624
625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
626 "Entered %s.\n", __func__);
627
628 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
629 mcp->out_mb = MBX_0;
630 mcp->in_mb = MBX_0;
631 if (IS_FWI2_CAPABLE(ha)) {
632 mcp->mb[1] = MSW(risc_addr);
633 mcp->mb[2] = LSW(risc_addr);
634 mcp->mb[3] = 0;
635 mcp->mb[4] = 0;
636 ha->flags.using_lr_setting = 0;
637 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
638 IS_QLA27XX(ha)) {
639 if (ql2xautodetectsfp) {
640 if (ha->flags.detected_lr_sfp) {
641 mcp->mb[4] |=
642 qla25xx_set_sfp_lr_dist(ha);
643 ha->flags.using_lr_setting = 1;
644 }
645 } else {
646 struct nvram_81xx *nv = ha->nvram;
647 /* set LR distance if specified in nvram */
648 if (nv->enhanced_features &
649 NEF_LR_DIST_ENABLE) {
650 mcp->mb[4] |=
651 qla25xx_set_nvr_lr_dist(ha);
652 ha->flags.using_lr_setting = 1;
653 }
654 }
655 }
656
657 if (ql2xnvmeenable && IS_QLA27XX(ha))
658 mcp->mb[4] |= NVME_ENABLE_FLAG;
659
660 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
661 struct nvram_81xx *nv = ha->nvram;
662 /* set minimum speed if specified in nvram */
663 if (nv->min_link_speed >= 2 &&
664 nv->min_link_speed <= 5) {
665 mcp->mb[4] |= BIT_4;
666 mcp->mb[11] = nv->min_link_speed;
667 mcp->out_mb |= MBX_11;
668 mcp->in_mb |= BIT_5;
669 vha->min_link_speed_feat = nv->min_link_speed;
670 }
671 }
672
673 if (ha->flags.exlogins_enabled)
674 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
675
676 if (ha->flags.exchoffld_enabled)
677 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
678
679 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
680 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
681 } else {
682 mcp->mb[1] = LSW(risc_addr);
683 mcp->out_mb |= MBX_1;
684 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
685 mcp->mb[2] = 0;
686 mcp->out_mb |= MBX_2;
687 }
688 }
689
690 mcp->tov = MBX_TOV_SECONDS;
691 mcp->flags = 0;
692 rval = qla2x00_mailbox_command(vha, mcp);
693
694 if (rval != QLA_SUCCESS) {
695 ql_dbg(ql_dbg_mbx, vha, 0x1026,
696 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
697 } else {
698 if (IS_FWI2_CAPABLE(ha)) {
699 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
700 ql_dbg(ql_dbg_mbx, vha, 0x119a,
701 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
702 ql_dbg(ql_dbg_mbx, vha, 0x1027,
703 "exchanges=%x.\n", mcp->mb[1]);
704 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
705 ha->max_speed_sup = mcp->mb[2] & BIT_0;
706 ql_dbg(ql_dbg_mbx, vha, 0x119b,
707 "Maximum speed supported=%s.\n",
708 ha->max_speed_sup ? "32Gps" : "16Gps");
709 if (vha->min_link_speed_feat) {
710 ha->min_link_speed = mcp->mb[5];
711 ql_dbg(ql_dbg_mbx, vha, 0x119c,
712 "Minimum speed set=%s.\n",
713 mcp->mb[5] == 5 ? "32Gps" :
714 mcp->mb[5] == 4 ? "16Gps" :
715 mcp->mb[5] == 3 ? "8Gps" :
716 mcp->mb[5] == 2 ? "4Gps" :
717 "unknown");
718 }
719 }
720 }
721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
722 "Done.\n");
723 }
724
725 return rval;
726 }
727
728 /*
729 * qla_get_exlogin_status
730 * Get extended login status
731 * uses the memory offload control/status Mailbox
732 *
733 * Input:
734 * ha: adapter state pointer.
735 * fwopt: firmware options
736 *
737 * Returns:
738 * qla2x00 local function status
739 *
740 * Context:
741 * Kernel context.
742 */
743 #define FETCH_XLOGINS_STAT 0x8
744 int
745 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
746 uint16_t *ex_logins_cnt)
747 {
748 int rval;
749 mbx_cmd_t mc;
750 mbx_cmd_t *mcp = &mc;
751
752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
753 "Entered %s\n", __func__);
754
755 memset(mcp->mb, 0 , sizeof(mcp->mb));
756 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
757 mcp->mb[1] = FETCH_XLOGINS_STAT;
758 mcp->out_mb = MBX_1|MBX_0;
759 mcp->in_mb = MBX_10|MBX_4|MBX_0;
760 mcp->tov = MBX_TOV_SECONDS;
761 mcp->flags = 0;
762
763 rval = qla2x00_mailbox_command(vha, mcp);
764 if (rval != QLA_SUCCESS) {
765 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
766 } else {
767 *buf_sz = mcp->mb[4];
768 *ex_logins_cnt = mcp->mb[10];
769
770 ql_log(ql_log_info, vha, 0x1190,
771 "buffer size 0x%x, exchange login count=%d\n",
772 mcp->mb[4], mcp->mb[10]);
773
774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
775 "Done %s.\n", __func__);
776 }
777
778 return rval;
779 }
780
781 /*
782 * qla_set_exlogin_mem_cfg
783 * set extended login memory configuration
784 * Mbx needs to be issues before init_cb is set
785 *
786 * Input:
787 * ha: adapter state pointer.
788 * buffer: buffer pointer
789 * phys_addr: physical address of buffer
790 * size: size of buffer
791 * TARGET_QUEUE_LOCK must be released
792 * ADAPTER_STATE_LOCK must be release
793 *
794 * Returns:
795 * qla2x00 local funxtion status code.
796 *
797 * Context:
798 * Kernel context.
799 */
800 #define CONFIG_XLOGINS_MEM 0x3
801 int
802 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
803 {
804 int rval;
805 mbx_cmd_t mc;
806 mbx_cmd_t *mcp = &mc;
807 struct qla_hw_data *ha = vha->hw;
808
809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
810 "Entered %s.\n", __func__);
811
812 memset(mcp->mb, 0 , sizeof(mcp->mb));
813 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
814 mcp->mb[1] = CONFIG_XLOGINS_MEM;
815 mcp->mb[2] = MSW(phys_addr);
816 mcp->mb[3] = LSW(phys_addr);
817 mcp->mb[6] = MSW(MSD(phys_addr));
818 mcp->mb[7] = LSW(MSD(phys_addr));
819 mcp->mb[8] = MSW(ha->exlogin_size);
820 mcp->mb[9] = LSW(ha->exlogin_size);
821 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
822 mcp->in_mb = MBX_11|MBX_0;
823 mcp->tov = MBX_TOV_SECONDS;
824 mcp->flags = 0;
825 rval = qla2x00_mailbox_command(vha, mcp);
826 if (rval != QLA_SUCCESS) {
827 /*EMPTY*/
828 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
829 } else {
830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
831 "Done %s.\n", __func__);
832 }
833
834 return rval;
835 }
836
837 /*
838 * qla_get_exchoffld_status
839 * Get exchange offload status
840 * uses the memory offload control/status Mailbox
841 *
842 * Input:
843 * ha: adapter state pointer.
844 * fwopt: firmware options
845 *
846 * Returns:
847 * qla2x00 local function status
848 *
849 * Context:
850 * Kernel context.
851 */
852 #define FETCH_XCHOFFLD_STAT 0x2
853 int
854 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
855 uint16_t *ex_logins_cnt)
856 {
857 int rval;
858 mbx_cmd_t mc;
859 mbx_cmd_t *mcp = &mc;
860
861 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
862 "Entered %s\n", __func__);
863
864 memset(mcp->mb, 0 , sizeof(mcp->mb));
865 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
866 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
867 mcp->out_mb = MBX_1|MBX_0;
868 mcp->in_mb = MBX_10|MBX_4|MBX_0;
869 mcp->tov = MBX_TOV_SECONDS;
870 mcp->flags = 0;
871
872 rval = qla2x00_mailbox_command(vha, mcp);
873 if (rval != QLA_SUCCESS) {
874 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
875 } else {
876 *buf_sz = mcp->mb[4];
877 *ex_logins_cnt = mcp->mb[10];
878
879 ql_log(ql_log_info, vha, 0x118e,
880 "buffer size 0x%x, exchange offload count=%d\n",
881 mcp->mb[4], mcp->mb[10]);
882
883 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
884 "Done %s.\n", __func__);
885 }
886
887 return rval;
888 }
889
890 /*
891 * qla_set_exchoffld_mem_cfg
892 * Set exchange offload memory configuration
893 * Mbx needs to be issues before init_cb is set
894 *
895 * Input:
896 * ha: adapter state pointer.
897 * buffer: buffer pointer
898 * phys_addr: physical address of buffer
899 * size: size of buffer
900 * TARGET_QUEUE_LOCK must be released
901 * ADAPTER_STATE_LOCK must be release
902 *
903 * Returns:
904 * qla2x00 local funxtion status code.
905 *
906 * Context:
907 * Kernel context.
908 */
909 #define CONFIG_XCHOFFLD_MEM 0x3
910 int
911 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
912 {
913 int rval;
914 mbx_cmd_t mc;
915 mbx_cmd_t *mcp = &mc;
916 struct qla_hw_data *ha = vha->hw;
917
918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
919 "Entered %s.\n", __func__);
920
921 memset(mcp->mb, 0 , sizeof(mcp->mb));
922 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
923 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
924 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
925 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
926 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
927 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
928 mcp->mb[8] = MSW(ha->exchoffld_size);
929 mcp->mb[9] = LSW(ha->exchoffld_size);
930 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
931 mcp->in_mb = MBX_11|MBX_0;
932 mcp->tov = MBX_TOV_SECONDS;
933 mcp->flags = 0;
934 rval = qla2x00_mailbox_command(vha, mcp);
935 if (rval != QLA_SUCCESS) {
936 /*EMPTY*/
937 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
938 } else {
939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
940 "Done %s.\n", __func__);
941 }
942
943 return rval;
944 }
945
946 /*
947 * qla2x00_get_fw_version
948 * Get firmware version.
949 *
950 * Input:
951 * ha: adapter state pointer.
952 * major: pointer for major number.
953 * minor: pointer for minor number.
954 * subminor: pointer for subminor number.
955 *
956 * Returns:
957 * qla2x00 local function return status code.
958 *
959 * Context:
960 * Kernel context.
961 */
962 int
963 qla2x00_get_fw_version(scsi_qla_host_t *vha)
964 {
965 int rval;
966 mbx_cmd_t mc;
967 mbx_cmd_t *mcp = &mc;
968 struct qla_hw_data *ha = vha->hw;
969
970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
971 "Entered %s.\n", __func__);
972
973 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
974 mcp->out_mb = MBX_0;
975 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
976 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
977 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
978 if (IS_FWI2_CAPABLE(ha))
979 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
980 if (IS_QLA27XX(ha))
981 mcp->in_mb |=
982 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
983 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
984
985 mcp->flags = 0;
986 mcp->tov = MBX_TOV_SECONDS;
987 rval = qla2x00_mailbox_command(vha, mcp);
988 if (rval != QLA_SUCCESS)
989 goto failed;
990
991 /* Return mailbox data. */
992 ha->fw_major_version = mcp->mb[1];
993 ha->fw_minor_version = mcp->mb[2];
994 ha->fw_subminor_version = mcp->mb[3];
995 ha->fw_attributes = mcp->mb[6];
996 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
997 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
998 else
999 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1000
1001 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1002 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1003 ha->mpi_version[1] = mcp->mb[11] >> 8;
1004 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1005 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1006 ha->phy_version[0] = mcp->mb[8] & 0xff;
1007 ha->phy_version[1] = mcp->mb[9] >> 8;
1008 ha->phy_version[2] = mcp->mb[9] & 0xff;
1009 }
1010
1011 if (IS_FWI2_CAPABLE(ha)) {
1012 ha->fw_attributes_h = mcp->mb[15];
1013 ha->fw_attributes_ext[0] = mcp->mb[16];
1014 ha->fw_attributes_ext[1] = mcp->mb[17];
1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1016 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1017 __func__, mcp->mb[15], mcp->mb[6]);
1018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1019 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1020 __func__, mcp->mb[17], mcp->mb[16]);
1021
1022 if (ha->fw_attributes_h & 0x4)
1023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1024 "%s: Firmware supports Extended Login 0x%x\n",
1025 __func__, ha->fw_attributes_h);
1026
1027 if (ha->fw_attributes_h & 0x8)
1028 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1029 "%s: Firmware supports Exchange Offload 0x%x\n",
1030 __func__, ha->fw_attributes_h);
1031
1032 /*
1033 * FW supports nvme and driver load parameter requested nvme.
1034 * BIT 26 of fw_attributes indicates NVMe support.
1035 */
1036 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
1037 vha->flags.nvme_enabled = 1;
1038 ql_log(ql_log_info, vha, 0xd302,
1039 "%s: FC-NVMe is Enabled (0x%x)\n",
1040 __func__, ha->fw_attributes_h);
1041 }
1042 }
1043
1044 if (IS_QLA27XX(ha)) {
1045 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1046 ha->mpi_version[1] = mcp->mb[11] >> 8;
1047 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1048 ha->pep_version[0] = mcp->mb[13] & 0xff;
1049 ha->pep_version[1] = mcp->mb[14] >> 8;
1050 ha->pep_version[2] = mcp->mb[14] & 0xff;
1051 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1052 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1053 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1054 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1055 }
1056
1057 failed:
1058 if (rval != QLA_SUCCESS) {
1059 /*EMPTY*/
1060 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1061 } else {
1062 /*EMPTY*/
1063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1064 "Done %s.\n", __func__);
1065 }
1066 return rval;
1067 }
1068
1069 /*
1070 * qla2x00_get_fw_options
1071 * Set firmware options.
1072 *
1073 * Input:
1074 * ha = adapter block pointer.
1075 * fwopt = pointer for firmware options.
1076 *
1077 * Returns:
1078 * qla2x00 local function return status code.
1079 *
1080 * Context:
1081 * Kernel context.
1082 */
1083 int
1084 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1085 {
1086 int rval;
1087 mbx_cmd_t mc;
1088 mbx_cmd_t *mcp = &mc;
1089
1090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1091 "Entered %s.\n", __func__);
1092
1093 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1094 mcp->out_mb = MBX_0;
1095 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1096 mcp->tov = MBX_TOV_SECONDS;
1097 mcp->flags = 0;
1098 rval = qla2x00_mailbox_command(vha, mcp);
1099
1100 if (rval != QLA_SUCCESS) {
1101 /*EMPTY*/
1102 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1103 } else {
1104 fwopts[0] = mcp->mb[0];
1105 fwopts[1] = mcp->mb[1];
1106 fwopts[2] = mcp->mb[2];
1107 fwopts[3] = mcp->mb[3];
1108
1109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1110 "Done %s.\n", __func__);
1111 }
1112
1113 return rval;
1114 }
1115
1116
1117 /*
1118 * qla2x00_set_fw_options
1119 * Set firmware options.
1120 *
1121 * Input:
1122 * ha = adapter block pointer.
1123 * fwopt = pointer for firmware options.
1124 *
1125 * Returns:
1126 * qla2x00 local function return status code.
1127 *
1128 * Context:
1129 * Kernel context.
1130 */
1131 int
1132 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1133 {
1134 int rval;
1135 mbx_cmd_t mc;
1136 mbx_cmd_t *mcp = &mc;
1137
1138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1139 "Entered %s.\n", __func__);
1140
1141 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1142 mcp->mb[1] = fwopts[1];
1143 mcp->mb[2] = fwopts[2];
1144 mcp->mb[3] = fwopts[3];
1145 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1146 mcp->in_mb = MBX_0;
1147 if (IS_FWI2_CAPABLE(vha->hw)) {
1148 mcp->in_mb |= MBX_1;
1149 mcp->mb[10] = fwopts[10];
1150 mcp->out_mb |= MBX_10;
1151 } else {
1152 mcp->mb[10] = fwopts[10];
1153 mcp->mb[11] = fwopts[11];
1154 mcp->mb[12] = 0; /* Undocumented, but used */
1155 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1156 }
1157 mcp->tov = MBX_TOV_SECONDS;
1158 mcp->flags = 0;
1159 rval = qla2x00_mailbox_command(vha, mcp);
1160
1161 fwopts[0] = mcp->mb[0];
1162
1163 if (rval != QLA_SUCCESS) {
1164 /*EMPTY*/
1165 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1166 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1167 } else {
1168 /*EMPTY*/
1169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1170 "Done %s.\n", __func__);
1171 }
1172
1173 return rval;
1174 }
1175
1176 /*
1177 * qla2x00_mbx_reg_test
1178 * Mailbox register wrap test.
1179 *
1180 * Input:
1181 * ha = adapter block pointer.
1182 * TARGET_QUEUE_LOCK must be released.
1183 * ADAPTER_STATE_LOCK must be released.
1184 *
1185 * Returns:
1186 * qla2x00 local function return status code.
1187 *
1188 * Context:
1189 * Kernel context.
1190 */
1191 int
1192 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1193 {
1194 int rval;
1195 mbx_cmd_t mc;
1196 mbx_cmd_t *mcp = &mc;
1197
1198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1199 "Entered %s.\n", __func__);
1200
1201 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1202 mcp->mb[1] = 0xAAAA;
1203 mcp->mb[2] = 0x5555;
1204 mcp->mb[3] = 0xAA55;
1205 mcp->mb[4] = 0x55AA;
1206 mcp->mb[5] = 0xA5A5;
1207 mcp->mb[6] = 0x5A5A;
1208 mcp->mb[7] = 0x2525;
1209 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1210 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1211 mcp->tov = MBX_TOV_SECONDS;
1212 mcp->flags = 0;
1213 rval = qla2x00_mailbox_command(vha, mcp);
1214
1215 if (rval == QLA_SUCCESS) {
1216 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1217 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1218 rval = QLA_FUNCTION_FAILED;
1219 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1220 mcp->mb[7] != 0x2525)
1221 rval = QLA_FUNCTION_FAILED;
1222 }
1223
1224 if (rval != QLA_SUCCESS) {
1225 /*EMPTY*/
1226 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1227 } else {
1228 /*EMPTY*/
1229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1230 "Done %s.\n", __func__);
1231 }
1232
1233 return rval;
1234 }
1235
1236 /*
1237 * qla2x00_verify_checksum
1238 * Verify firmware checksum.
1239 *
1240 * Input:
1241 * ha = adapter block pointer.
1242 * TARGET_QUEUE_LOCK must be released.
1243 * ADAPTER_STATE_LOCK must be released.
1244 *
1245 * Returns:
1246 * qla2x00 local function return status code.
1247 *
1248 * Context:
1249 * Kernel context.
1250 */
1251 int
1252 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1253 {
1254 int rval;
1255 mbx_cmd_t mc;
1256 mbx_cmd_t *mcp = &mc;
1257
1258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1259 "Entered %s.\n", __func__);
1260
1261 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1262 mcp->out_mb = MBX_0;
1263 mcp->in_mb = MBX_0;
1264 if (IS_FWI2_CAPABLE(vha->hw)) {
1265 mcp->mb[1] = MSW(risc_addr);
1266 mcp->mb[2] = LSW(risc_addr);
1267 mcp->out_mb |= MBX_2|MBX_1;
1268 mcp->in_mb |= MBX_2|MBX_1;
1269 } else {
1270 mcp->mb[1] = LSW(risc_addr);
1271 mcp->out_mb |= MBX_1;
1272 mcp->in_mb |= MBX_1;
1273 }
1274
1275 mcp->tov = MBX_TOV_SECONDS;
1276 mcp->flags = 0;
1277 rval = qla2x00_mailbox_command(vha, mcp);
1278
1279 if (rval != QLA_SUCCESS) {
1280 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1281 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1282 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1283 } else {
1284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1285 "Done %s.\n", __func__);
1286 }
1287
1288 return rval;
1289 }
1290
1291 /*
1292 * qla2x00_issue_iocb
1293 * Issue IOCB using mailbox command
1294 *
1295 * Input:
1296 * ha = adapter state pointer.
1297 * buffer = buffer pointer.
1298 * phys_addr = physical address of buffer.
1299 * size = size of buffer.
1300 * TARGET_QUEUE_LOCK must be released.
1301 * ADAPTER_STATE_LOCK must be released.
1302 *
1303 * Returns:
1304 * qla2x00 local function return status code.
1305 *
1306 * Context:
1307 * Kernel context.
1308 */
1309 int
1310 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1311 dma_addr_t phys_addr, size_t size, uint32_t tov)
1312 {
1313 int rval;
1314 mbx_cmd_t mc;
1315 mbx_cmd_t *mcp = &mc;
1316
1317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1318 "Entered %s.\n", __func__);
1319
1320 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1321 mcp->mb[1] = 0;
1322 mcp->mb[2] = MSW(phys_addr);
1323 mcp->mb[3] = LSW(phys_addr);
1324 mcp->mb[6] = MSW(MSD(phys_addr));
1325 mcp->mb[7] = LSW(MSD(phys_addr));
1326 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1327 mcp->in_mb = MBX_2|MBX_0;
1328 mcp->tov = tov;
1329 mcp->flags = 0;
1330 rval = qla2x00_mailbox_command(vha, mcp);
1331
1332 if (rval != QLA_SUCCESS) {
1333 /*EMPTY*/
1334 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1335 } else {
1336 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1337
1338 /* Mask reserved bits. */
1339 sts_entry->entry_status &=
1340 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1342 "Done %s.\n", __func__);
1343 }
1344
1345 return rval;
1346 }
1347
1348 int
1349 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1350 size_t size)
1351 {
1352 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1353 MBX_TOV_SECONDS);
1354 }
1355
1356 /*
1357 * qla2x00_abort_command
1358 * Abort command aborts a specified IOCB.
1359 *
1360 * Input:
1361 * ha = adapter block pointer.
1362 * sp = SB structure pointer.
1363 *
1364 * Returns:
1365 * qla2x00 local function return status code.
1366 *
1367 * Context:
1368 * Kernel context.
1369 */
1370 int
1371 qla2x00_abort_command(srb_t *sp)
1372 {
1373 unsigned long flags = 0;
1374 int rval;
1375 uint32_t handle = 0;
1376 mbx_cmd_t mc;
1377 mbx_cmd_t *mcp = &mc;
1378 fc_port_t *fcport = sp->fcport;
1379 scsi_qla_host_t *vha = fcport->vha;
1380 struct qla_hw_data *ha = vha->hw;
1381 struct req_que *req;
1382 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1383
1384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1385 "Entered %s.\n", __func__);
1386
1387 if (vha->flags.qpairs_available && sp->qpair)
1388 req = sp->qpair->req;
1389 else
1390 req = vha->req;
1391
1392 spin_lock_irqsave(&ha->hardware_lock, flags);
1393 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1394 if (req->outstanding_cmds[handle] == sp)
1395 break;
1396 }
1397 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1398
1399 if (handle == req->num_outstanding_cmds) {
1400 /* command not found */
1401 return QLA_FUNCTION_FAILED;
1402 }
1403
1404 mcp->mb[0] = MBC_ABORT_COMMAND;
1405 if (HAS_EXTENDED_IDS(ha))
1406 mcp->mb[1] = fcport->loop_id;
1407 else
1408 mcp->mb[1] = fcport->loop_id << 8;
1409 mcp->mb[2] = (uint16_t)handle;
1410 mcp->mb[3] = (uint16_t)(handle >> 16);
1411 mcp->mb[6] = (uint16_t)cmd->device->lun;
1412 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1413 mcp->in_mb = MBX_0;
1414 mcp->tov = MBX_TOV_SECONDS;
1415 mcp->flags = 0;
1416 rval = qla2x00_mailbox_command(vha, mcp);
1417
1418 if (rval != QLA_SUCCESS) {
1419 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1420 } else {
1421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1422 "Done %s.\n", __func__);
1423 }
1424
1425 return rval;
1426 }
1427
1428 int
1429 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1430 {
1431 int rval, rval2;
1432 mbx_cmd_t mc;
1433 mbx_cmd_t *mcp = &mc;
1434 scsi_qla_host_t *vha;
1435 struct req_que *req;
1436 struct rsp_que *rsp;
1437
1438 l = l;
1439 vha = fcport->vha;
1440
1441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1442 "Entered %s.\n", __func__);
1443
1444 req = vha->hw->req_q_map[0];
1445 rsp = req->rsp;
1446 mcp->mb[0] = MBC_ABORT_TARGET;
1447 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1448 if (HAS_EXTENDED_IDS(vha->hw)) {
1449 mcp->mb[1] = fcport->loop_id;
1450 mcp->mb[10] = 0;
1451 mcp->out_mb |= MBX_10;
1452 } else {
1453 mcp->mb[1] = fcport->loop_id << 8;
1454 }
1455 mcp->mb[2] = vha->hw->loop_reset_delay;
1456 mcp->mb[9] = vha->vp_idx;
1457
1458 mcp->in_mb = MBX_0;
1459 mcp->tov = MBX_TOV_SECONDS;
1460 mcp->flags = 0;
1461 rval = qla2x00_mailbox_command(vha, mcp);
1462 if (rval != QLA_SUCCESS) {
1463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1464 "Failed=%x.\n", rval);
1465 }
1466
1467 /* Issue marker IOCB. */
1468 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1469 MK_SYNC_ID);
1470 if (rval2 != QLA_SUCCESS) {
1471 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1472 "Failed to issue marker IOCB (%x).\n", rval2);
1473 } else {
1474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1475 "Done %s.\n", __func__);
1476 }
1477
1478 return rval;
1479 }
1480
1481 int
1482 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1483 {
1484 int rval, rval2;
1485 mbx_cmd_t mc;
1486 mbx_cmd_t *mcp = &mc;
1487 scsi_qla_host_t *vha;
1488 struct req_que *req;
1489 struct rsp_que *rsp;
1490
1491 vha = fcport->vha;
1492
1493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1494 "Entered %s.\n", __func__);
1495
1496 req = vha->hw->req_q_map[0];
1497 rsp = req->rsp;
1498 mcp->mb[0] = MBC_LUN_RESET;
1499 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1500 if (HAS_EXTENDED_IDS(vha->hw))
1501 mcp->mb[1] = fcport->loop_id;
1502 else
1503 mcp->mb[1] = fcport->loop_id << 8;
1504 mcp->mb[2] = (u32)l;
1505 mcp->mb[3] = 0;
1506 mcp->mb[9] = vha->vp_idx;
1507
1508 mcp->in_mb = MBX_0;
1509 mcp->tov = MBX_TOV_SECONDS;
1510 mcp->flags = 0;
1511 rval = qla2x00_mailbox_command(vha, mcp);
1512 if (rval != QLA_SUCCESS) {
1513 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1514 }
1515
1516 /* Issue marker IOCB. */
1517 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1518 MK_SYNC_ID_LUN);
1519 if (rval2 != QLA_SUCCESS) {
1520 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1521 "Failed to issue marker IOCB (%x).\n", rval2);
1522 } else {
1523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1524 "Done %s.\n", __func__);
1525 }
1526
1527 return rval;
1528 }
1529
1530 /*
1531 * qla2x00_get_adapter_id
1532 * Get adapter ID and topology.
1533 *
1534 * Input:
1535 * ha = adapter block pointer.
1536 * id = pointer for loop ID.
1537 * al_pa = pointer for AL_PA.
1538 * area = pointer for area.
1539 * domain = pointer for domain.
1540 * top = pointer for topology.
1541 * TARGET_QUEUE_LOCK must be released.
1542 * ADAPTER_STATE_LOCK must be released.
1543 *
1544 * Returns:
1545 * qla2x00 local function return status code.
1546 *
1547 * Context:
1548 * Kernel context.
1549 */
1550 int
1551 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1552 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1553 {
1554 int rval;
1555 mbx_cmd_t mc;
1556 mbx_cmd_t *mcp = &mc;
1557
1558 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1559 "Entered %s.\n", __func__);
1560
1561 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1562 mcp->mb[9] = vha->vp_idx;
1563 mcp->out_mb = MBX_9|MBX_0;
1564 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1565 if (IS_CNA_CAPABLE(vha->hw))
1566 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1567 if (IS_FWI2_CAPABLE(vha->hw))
1568 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1569 if (IS_QLA27XX(vha->hw))
1570 mcp->in_mb |= MBX_15;
1571 mcp->tov = MBX_TOV_SECONDS;
1572 mcp->flags = 0;
1573 rval = qla2x00_mailbox_command(vha, mcp);
1574 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1575 rval = QLA_COMMAND_ERROR;
1576 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1577 rval = QLA_INVALID_COMMAND;
1578
1579 /* Return data. */
1580 *id = mcp->mb[1];
1581 *al_pa = LSB(mcp->mb[2]);
1582 *area = MSB(mcp->mb[2]);
1583 *domain = LSB(mcp->mb[3]);
1584 *top = mcp->mb[6];
1585 *sw_cap = mcp->mb[7];
1586
1587 if (rval != QLA_SUCCESS) {
1588 /*EMPTY*/
1589 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1590 } else {
1591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1592 "Done %s.\n", __func__);
1593
1594 if (IS_CNA_CAPABLE(vha->hw)) {
1595 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1596 vha->fcoe_fcf_idx = mcp->mb[10];
1597 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1598 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1599 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1600 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1601 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1602 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1603 }
1604 /* If FA-WWN supported */
1605 if (IS_FAWWN_CAPABLE(vha->hw)) {
1606 if (mcp->mb[7] & BIT_14) {
1607 vha->port_name[0] = MSB(mcp->mb[16]);
1608 vha->port_name[1] = LSB(mcp->mb[16]);
1609 vha->port_name[2] = MSB(mcp->mb[17]);
1610 vha->port_name[3] = LSB(mcp->mb[17]);
1611 vha->port_name[4] = MSB(mcp->mb[18]);
1612 vha->port_name[5] = LSB(mcp->mb[18]);
1613 vha->port_name[6] = MSB(mcp->mb[19]);
1614 vha->port_name[7] = LSB(mcp->mb[19]);
1615 fc_host_port_name(vha->host) =
1616 wwn_to_u64(vha->port_name);
1617 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1618 "FA-WWN acquired %016llx\n",
1619 wwn_to_u64(vha->port_name));
1620 }
1621 }
1622
1623 if (IS_QLA27XX(vha->hw))
1624 vha->bbcr = mcp->mb[15];
1625 }
1626
1627 return rval;
1628 }
1629
1630 /*
1631 * qla2x00_get_retry_cnt
1632 * Get current firmware login retry count and delay.
1633 *
1634 * Input:
1635 * ha = adapter block pointer.
1636 * retry_cnt = pointer to login retry count.
1637 * tov = pointer to login timeout value.
1638 *
1639 * Returns:
1640 * qla2x00 local function return status code.
1641 *
1642 * Context:
1643 * Kernel context.
1644 */
1645 int
1646 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1647 uint16_t *r_a_tov)
1648 {
1649 int rval;
1650 uint16_t ratov;
1651 mbx_cmd_t mc;
1652 mbx_cmd_t *mcp = &mc;
1653
1654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1655 "Entered %s.\n", __func__);
1656
1657 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1658 mcp->out_mb = MBX_0;
1659 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1660 mcp->tov = MBX_TOV_SECONDS;
1661 mcp->flags = 0;
1662 rval = qla2x00_mailbox_command(vha, mcp);
1663
1664 if (rval != QLA_SUCCESS) {
1665 /*EMPTY*/
1666 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1667 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1668 } else {
1669 /* Convert returned data and check our values. */
1670 *r_a_tov = mcp->mb[3] / 2;
1671 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1672 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1673 /* Update to the larger values */
1674 *retry_cnt = (uint8_t)mcp->mb[1];
1675 *tov = ratov;
1676 }
1677
1678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1679 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1680 }
1681
1682 return rval;
1683 }
1684
1685 /*
1686 * qla2x00_init_firmware
1687 * Initialize adapter firmware.
1688 *
1689 * Input:
1690 * ha = adapter block pointer.
1691 * dptr = Initialization control block pointer.
1692 * size = size of initialization control block.
1693 * TARGET_QUEUE_LOCK must be released.
1694 * ADAPTER_STATE_LOCK must be released.
1695 *
1696 * Returns:
1697 * qla2x00 local function return status code.
1698 *
1699 * Context:
1700 * Kernel context.
1701 */
1702 int
1703 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1704 {
1705 int rval;
1706 mbx_cmd_t mc;
1707 mbx_cmd_t *mcp = &mc;
1708 struct qla_hw_data *ha = vha->hw;
1709
1710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1711 "Entered %s.\n", __func__);
1712
1713 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1714 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1715 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1716
1717 if (ha->flags.npiv_supported)
1718 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1719 else
1720 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1721
1722 mcp->mb[1] = 0;
1723 mcp->mb[2] = MSW(ha->init_cb_dma);
1724 mcp->mb[3] = LSW(ha->init_cb_dma);
1725 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1726 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1727 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1728 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1729 mcp->mb[1] = BIT_0;
1730 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1731 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1732 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1733 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1734 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1735 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1736 }
1737 /* 1 and 2 should normally be captured. */
1738 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1739 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1740 /* mb3 is additional info about the installed SFP. */
1741 mcp->in_mb |= MBX_3;
1742 mcp->buf_size = size;
1743 mcp->flags = MBX_DMA_OUT;
1744 mcp->tov = MBX_TOV_SECONDS;
1745 rval = qla2x00_mailbox_command(vha, mcp);
1746
1747 if (rval != QLA_SUCCESS) {
1748 /*EMPTY*/
1749 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1750 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1751 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1752 } else {
1753 if (IS_QLA27XX(ha)) {
1754 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1755 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1756 "Invalid SFP/Validation Failed\n");
1757 }
1758 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1759 "Done %s.\n", __func__);
1760 }
1761
1762 return rval;
1763 }
1764
1765
1766 /*
1767 * qla2x00_get_port_database
1768 * Issue normal/enhanced get port database mailbox command
1769 * and copy device name as necessary.
1770 *
1771 * Input:
1772 * ha = adapter state pointer.
1773 * dev = structure pointer.
1774 * opt = enhanced cmd option byte.
1775 *
1776 * Returns:
1777 * qla2x00 local function return status code.
1778 *
1779 * Context:
1780 * Kernel context.
1781 */
1782 int
1783 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1784 {
1785 int rval;
1786 mbx_cmd_t mc;
1787 mbx_cmd_t *mcp = &mc;
1788 port_database_t *pd;
1789 struct port_database_24xx *pd24;
1790 dma_addr_t pd_dma;
1791 struct qla_hw_data *ha = vha->hw;
1792
1793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1794 "Entered %s.\n", __func__);
1795
1796 pd24 = NULL;
1797 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1798 if (pd == NULL) {
1799 ql_log(ql_log_warn, vha, 0x1050,
1800 "Failed to allocate port database structure.\n");
1801 fcport->query = 0;
1802 return QLA_MEMORY_ALLOC_FAILED;
1803 }
1804
1805 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1806 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1807 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1808 mcp->mb[2] = MSW(pd_dma);
1809 mcp->mb[3] = LSW(pd_dma);
1810 mcp->mb[6] = MSW(MSD(pd_dma));
1811 mcp->mb[7] = LSW(MSD(pd_dma));
1812 mcp->mb[9] = vha->vp_idx;
1813 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1814 mcp->in_mb = MBX_0;
1815 if (IS_FWI2_CAPABLE(ha)) {
1816 mcp->mb[1] = fcport->loop_id;
1817 mcp->mb[10] = opt;
1818 mcp->out_mb |= MBX_10|MBX_1;
1819 mcp->in_mb |= MBX_1;
1820 } else if (HAS_EXTENDED_IDS(ha)) {
1821 mcp->mb[1] = fcport->loop_id;
1822 mcp->mb[10] = opt;
1823 mcp->out_mb |= MBX_10|MBX_1;
1824 } else {
1825 mcp->mb[1] = fcport->loop_id << 8 | opt;
1826 mcp->out_mb |= MBX_1;
1827 }
1828 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1829 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1830 mcp->flags = MBX_DMA_IN;
1831 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1832 rval = qla2x00_mailbox_command(vha, mcp);
1833 if (rval != QLA_SUCCESS)
1834 goto gpd_error_out;
1835
1836 if (IS_FWI2_CAPABLE(ha)) {
1837 uint64_t zero = 0;
1838 u8 current_login_state, last_login_state;
1839
1840 pd24 = (struct port_database_24xx *) pd;
1841
1842 /* Check for logged in state. */
1843 if (fcport->fc4f_nvme) {
1844 current_login_state = pd24->current_login_state >> 4;
1845 last_login_state = pd24->last_login_state >> 4;
1846 } else {
1847 current_login_state = pd24->current_login_state & 0xf;
1848 last_login_state = pd24->last_login_state & 0xf;
1849 }
1850 fcport->current_login_state = pd24->current_login_state;
1851 fcport->last_login_state = pd24->last_login_state;
1852
1853 /* Check for logged in state. */
1854 if (current_login_state != PDS_PRLI_COMPLETE &&
1855 last_login_state != PDS_PRLI_COMPLETE) {
1856 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1857 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1858 current_login_state, last_login_state,
1859 fcport->loop_id);
1860 rval = QLA_FUNCTION_FAILED;
1861
1862 if (!fcport->query)
1863 goto gpd_error_out;
1864 }
1865
1866 if (fcport->loop_id == FC_NO_LOOP_ID ||
1867 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1868 memcmp(fcport->port_name, pd24->port_name, 8))) {
1869 /* We lost the device mid way. */
1870 rval = QLA_NOT_LOGGED_IN;
1871 goto gpd_error_out;
1872 }
1873
1874 /* Names are little-endian. */
1875 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1876 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1877
1878 /* Get port_id of device. */
1879 fcport->d_id.b.domain = pd24->port_id[0];
1880 fcport->d_id.b.area = pd24->port_id[1];
1881 fcport->d_id.b.al_pa = pd24->port_id[2];
1882 fcport->d_id.b.rsvd_1 = 0;
1883
1884 /* If not target must be initiator or unknown type. */
1885 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1886 fcport->port_type = FCT_INITIATOR;
1887 else
1888 fcport->port_type = FCT_TARGET;
1889
1890 /* Passback COS information. */
1891 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1892 FC_COS_CLASS2 : FC_COS_CLASS3;
1893
1894 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1895 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1896 } else {
1897 uint64_t zero = 0;
1898
1899 /* Check for logged in state. */
1900 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1901 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1902 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1903 "Unable to verify login-state (%x/%x) - "
1904 "portid=%02x%02x%02x.\n", pd->master_state,
1905 pd->slave_state, fcport->d_id.b.domain,
1906 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1907 rval = QLA_FUNCTION_FAILED;
1908 goto gpd_error_out;
1909 }
1910
1911 if (fcport->loop_id == FC_NO_LOOP_ID ||
1912 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1913 memcmp(fcport->port_name, pd->port_name, 8))) {
1914 /* We lost the device mid way. */
1915 rval = QLA_NOT_LOGGED_IN;
1916 goto gpd_error_out;
1917 }
1918
1919 /* Names are little-endian. */
1920 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1921 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1922
1923 /* Get port_id of device. */
1924 fcport->d_id.b.domain = pd->port_id[0];
1925 fcport->d_id.b.area = pd->port_id[3];
1926 fcport->d_id.b.al_pa = pd->port_id[2];
1927 fcport->d_id.b.rsvd_1 = 0;
1928
1929 /* If not target must be initiator or unknown type. */
1930 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1931 fcport->port_type = FCT_INITIATOR;
1932 else
1933 fcport->port_type = FCT_TARGET;
1934
1935 /* Passback COS information. */
1936 fcport->supported_classes = (pd->options & BIT_4) ?
1937 FC_COS_CLASS2: FC_COS_CLASS3;
1938 }
1939
1940 gpd_error_out:
1941 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1942 fcport->query = 0;
1943
1944 if (rval != QLA_SUCCESS) {
1945 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1946 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1947 mcp->mb[0], mcp->mb[1]);
1948 } else {
1949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1950 "Done %s.\n", __func__);
1951 }
1952
1953 return rval;
1954 }
1955
1956 /*
1957 * qla2x00_get_firmware_state
1958 * Get adapter firmware state.
1959 *
1960 * Input:
1961 * ha = adapter block pointer.
1962 * dptr = pointer for firmware state.
1963 * TARGET_QUEUE_LOCK must be released.
1964 * ADAPTER_STATE_LOCK must be released.
1965 *
1966 * Returns:
1967 * qla2x00 local function return status code.
1968 *
1969 * Context:
1970 * Kernel context.
1971 */
1972 int
1973 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1974 {
1975 int rval;
1976 mbx_cmd_t mc;
1977 mbx_cmd_t *mcp = &mc;
1978 struct qla_hw_data *ha = vha->hw;
1979
1980 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1981 "Entered %s.\n", __func__);
1982
1983 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1984 mcp->out_mb = MBX_0;
1985 if (IS_FWI2_CAPABLE(vha->hw))
1986 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1987 else
1988 mcp->in_mb = MBX_1|MBX_0;
1989 mcp->tov = MBX_TOV_SECONDS;
1990 mcp->flags = 0;
1991 rval = qla2x00_mailbox_command(vha, mcp);
1992
1993 /* Return firmware states. */
1994 states[0] = mcp->mb[1];
1995 if (IS_FWI2_CAPABLE(vha->hw)) {
1996 states[1] = mcp->mb[2];
1997 states[2] = mcp->mb[3]; /* SFP info */
1998 states[3] = mcp->mb[4];
1999 states[4] = mcp->mb[5];
2000 states[5] = mcp->mb[6]; /* DPORT status */
2001 }
2002
2003 if (rval != QLA_SUCCESS) {
2004 /*EMPTY*/
2005 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2006 } else {
2007 if (IS_QLA27XX(ha)) {
2008 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2009 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2010 "Invalid SFP/Validation Failed\n");
2011 }
2012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2013 "Done %s.\n", __func__);
2014 }
2015
2016 return rval;
2017 }
2018
2019 /*
2020 * qla2x00_get_port_name
2021 * Issue get port name mailbox command.
2022 * Returned name is in big endian format.
2023 *
2024 * Input:
2025 * ha = adapter block pointer.
2026 * loop_id = loop ID of device.
2027 * name = pointer for name.
2028 * TARGET_QUEUE_LOCK must be released.
2029 * ADAPTER_STATE_LOCK must be released.
2030 *
2031 * Returns:
2032 * qla2x00 local function return status code.
2033 *
2034 * Context:
2035 * Kernel context.
2036 */
2037 int
2038 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2039 uint8_t opt)
2040 {
2041 int rval;
2042 mbx_cmd_t mc;
2043 mbx_cmd_t *mcp = &mc;
2044
2045 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2046 "Entered %s.\n", __func__);
2047
2048 mcp->mb[0] = MBC_GET_PORT_NAME;
2049 mcp->mb[9] = vha->vp_idx;
2050 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2051 if (HAS_EXTENDED_IDS(vha->hw)) {
2052 mcp->mb[1] = loop_id;
2053 mcp->mb[10] = opt;
2054 mcp->out_mb |= MBX_10;
2055 } else {
2056 mcp->mb[1] = loop_id << 8 | opt;
2057 }
2058
2059 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2060 mcp->tov = MBX_TOV_SECONDS;
2061 mcp->flags = 0;
2062 rval = qla2x00_mailbox_command(vha, mcp);
2063
2064 if (rval != QLA_SUCCESS) {
2065 /*EMPTY*/
2066 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2067 } else {
2068 if (name != NULL) {
2069 /* This function returns name in big endian. */
2070 name[0] = MSB(mcp->mb[2]);
2071 name[1] = LSB(mcp->mb[2]);
2072 name[2] = MSB(mcp->mb[3]);
2073 name[3] = LSB(mcp->mb[3]);
2074 name[4] = MSB(mcp->mb[6]);
2075 name[5] = LSB(mcp->mb[6]);
2076 name[6] = MSB(mcp->mb[7]);
2077 name[7] = LSB(mcp->mb[7]);
2078 }
2079
2080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2081 "Done %s.\n", __func__);
2082 }
2083
2084 return rval;
2085 }
2086
2087 /*
2088 * qla24xx_link_initialization
2089 * Issue link initialization mailbox command.
2090 *
2091 * Input:
2092 * ha = adapter block pointer.
2093 * TARGET_QUEUE_LOCK must be released.
2094 * ADAPTER_STATE_LOCK must be released.
2095 *
2096 * Returns:
2097 * qla2x00 local function return status code.
2098 *
2099 * Context:
2100 * Kernel context.
2101 */
2102 int
2103 qla24xx_link_initialize(scsi_qla_host_t *vha)
2104 {
2105 int rval;
2106 mbx_cmd_t mc;
2107 mbx_cmd_t *mcp = &mc;
2108
2109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2110 "Entered %s.\n", __func__);
2111
2112 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2113 return QLA_FUNCTION_FAILED;
2114
2115 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2116 mcp->mb[1] = BIT_4;
2117 if (vha->hw->operating_mode == LOOP)
2118 mcp->mb[1] |= BIT_6;
2119 else
2120 mcp->mb[1] |= BIT_5;
2121 mcp->mb[2] = 0;
2122 mcp->mb[3] = 0;
2123 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2124 mcp->in_mb = MBX_0;
2125 mcp->tov = MBX_TOV_SECONDS;
2126 mcp->flags = 0;
2127 rval = qla2x00_mailbox_command(vha, mcp);
2128
2129 if (rval != QLA_SUCCESS) {
2130 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2131 } else {
2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2133 "Done %s.\n", __func__);
2134 }
2135
2136 return rval;
2137 }
2138
2139 /*
2140 * qla2x00_lip_reset
2141 * Issue LIP reset mailbox command.
2142 *
2143 * Input:
2144 * ha = adapter block pointer.
2145 * TARGET_QUEUE_LOCK must be released.
2146 * ADAPTER_STATE_LOCK must be released.
2147 *
2148 * Returns:
2149 * qla2x00 local function return status code.
2150 *
2151 * Context:
2152 * Kernel context.
2153 */
2154 int
2155 qla2x00_lip_reset(scsi_qla_host_t *vha)
2156 {
2157 int rval;
2158 mbx_cmd_t mc;
2159 mbx_cmd_t *mcp = &mc;
2160
2161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2162 "Entered %s.\n", __func__);
2163
2164 if (IS_CNA_CAPABLE(vha->hw)) {
2165 /* Logout across all FCFs. */
2166 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2167 mcp->mb[1] = BIT_1;
2168 mcp->mb[2] = 0;
2169 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2170 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2171 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2172 mcp->mb[1] = BIT_6;
2173 mcp->mb[2] = 0;
2174 mcp->mb[3] = vha->hw->loop_reset_delay;
2175 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2176 } else {
2177 mcp->mb[0] = MBC_LIP_RESET;
2178 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2179 if (HAS_EXTENDED_IDS(vha->hw)) {
2180 mcp->mb[1] = 0x00ff;
2181 mcp->mb[10] = 0;
2182 mcp->out_mb |= MBX_10;
2183 } else {
2184 mcp->mb[1] = 0xff00;
2185 }
2186 mcp->mb[2] = vha->hw->loop_reset_delay;
2187 mcp->mb[3] = 0;
2188 }
2189 mcp->in_mb = MBX_0;
2190 mcp->tov = MBX_TOV_SECONDS;
2191 mcp->flags = 0;
2192 rval = qla2x00_mailbox_command(vha, mcp);
2193
2194 if (rval != QLA_SUCCESS) {
2195 /*EMPTY*/
2196 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2197 } else {
2198 /*EMPTY*/
2199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2200 "Done %s.\n", __func__);
2201 }
2202
2203 return rval;
2204 }
2205
2206 /*
2207 * qla2x00_send_sns
2208 * Send SNS command.
2209 *
2210 * Input:
2211 * ha = adapter block pointer.
2212 * sns = pointer for command.
2213 * cmd_size = command size.
2214 * buf_size = response/command size.
2215 * TARGET_QUEUE_LOCK must be released.
2216 * ADAPTER_STATE_LOCK must be released.
2217 *
2218 * Returns:
2219 * qla2x00 local function return status code.
2220 *
2221 * Context:
2222 * Kernel context.
2223 */
2224 int
2225 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2226 uint16_t cmd_size, size_t buf_size)
2227 {
2228 int rval;
2229 mbx_cmd_t mc;
2230 mbx_cmd_t *mcp = &mc;
2231
2232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2233 "Entered %s.\n", __func__);
2234
2235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2236 "Retry cnt=%d ratov=%d total tov=%d.\n",
2237 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2238
2239 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2240 mcp->mb[1] = cmd_size;
2241 mcp->mb[2] = MSW(sns_phys_address);
2242 mcp->mb[3] = LSW(sns_phys_address);
2243 mcp->mb[6] = MSW(MSD(sns_phys_address));
2244 mcp->mb[7] = LSW(MSD(sns_phys_address));
2245 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2246 mcp->in_mb = MBX_0|MBX_1;
2247 mcp->buf_size = buf_size;
2248 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2249 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2250 rval = qla2x00_mailbox_command(vha, mcp);
2251
2252 if (rval != QLA_SUCCESS) {
2253 /*EMPTY*/
2254 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2255 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2256 rval, mcp->mb[0], mcp->mb[1]);
2257 } else {
2258 /*EMPTY*/
2259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2260 "Done %s.\n", __func__);
2261 }
2262
2263 return rval;
2264 }
2265
2266 int
2267 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2268 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2269 {
2270 int rval;
2271
2272 struct logio_entry_24xx *lg;
2273 dma_addr_t lg_dma;
2274 uint32_t iop[2];
2275 struct qla_hw_data *ha = vha->hw;
2276 struct req_que *req;
2277
2278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2279 "Entered %s.\n", __func__);
2280
2281 if (vha->vp_idx && vha->qpair)
2282 req = vha->qpair->req;
2283 else
2284 req = ha->req_q_map[0];
2285
2286 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2287 if (lg == NULL) {
2288 ql_log(ql_log_warn, vha, 0x1062,
2289 "Failed to allocate login IOCB.\n");
2290 return QLA_MEMORY_ALLOC_FAILED;
2291 }
2292
2293 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2294 lg->entry_count = 1;
2295 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2296 lg->nport_handle = cpu_to_le16(loop_id);
2297 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2298 if (opt & BIT_0)
2299 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2300 if (opt & BIT_1)
2301 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2302 lg->port_id[0] = al_pa;
2303 lg->port_id[1] = area;
2304 lg->port_id[2] = domain;
2305 lg->vp_index = vha->vp_idx;
2306 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2307 (ha->r_a_tov / 10 * 2) + 2);
2308 if (rval != QLA_SUCCESS) {
2309 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2310 "Failed to issue login IOCB (%x).\n", rval);
2311 } else if (lg->entry_status != 0) {
2312 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2313 "Failed to complete IOCB -- error status (%x).\n",
2314 lg->entry_status);
2315 rval = QLA_FUNCTION_FAILED;
2316 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2317 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2318 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2319
2320 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2321 "Failed to complete IOCB -- completion status (%x) "
2322 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2323 iop[0], iop[1]);
2324
2325 switch (iop[0]) {
2326 case LSC_SCODE_PORTID_USED:
2327 mb[0] = MBS_PORT_ID_USED;
2328 mb[1] = LSW(iop[1]);
2329 break;
2330 case LSC_SCODE_NPORT_USED:
2331 mb[0] = MBS_LOOP_ID_USED;
2332 break;
2333 case LSC_SCODE_NOLINK:
2334 case LSC_SCODE_NOIOCB:
2335 case LSC_SCODE_NOXCB:
2336 case LSC_SCODE_CMD_FAILED:
2337 case LSC_SCODE_NOFABRIC:
2338 case LSC_SCODE_FW_NOT_READY:
2339 case LSC_SCODE_NOT_LOGGED_IN:
2340 case LSC_SCODE_NOPCB:
2341 case LSC_SCODE_ELS_REJECT:
2342 case LSC_SCODE_CMD_PARAM_ERR:
2343 case LSC_SCODE_NONPORT:
2344 case LSC_SCODE_LOGGED_IN:
2345 case LSC_SCODE_NOFLOGI_ACC:
2346 default:
2347 mb[0] = MBS_COMMAND_ERROR;
2348 break;
2349 }
2350 } else {
2351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2352 "Done %s.\n", __func__);
2353
2354 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2355
2356 mb[0] = MBS_COMMAND_COMPLETE;
2357 mb[1] = 0;
2358 if (iop[0] & BIT_4) {
2359 if (iop[0] & BIT_8)
2360 mb[1] |= BIT_1;
2361 } else
2362 mb[1] = BIT_0;
2363
2364 /* Passback COS information. */
2365 mb[10] = 0;
2366 if (lg->io_parameter[7] || lg->io_parameter[8])
2367 mb[10] |= BIT_0; /* Class 2. */
2368 if (lg->io_parameter[9] || lg->io_parameter[10])
2369 mb[10] |= BIT_1; /* Class 3. */
2370 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2371 mb[10] |= BIT_7; /* Confirmed Completion
2372 * Allowed
2373 */
2374 }
2375
2376 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2377
2378 return rval;
2379 }
2380
2381 /*
2382 * qla2x00_login_fabric
2383 * Issue login fabric port mailbox command.
2384 *
2385 * Input:
2386 * ha = adapter block pointer.
2387 * loop_id = device loop ID.
2388 * domain = device domain.
2389 * area = device area.
2390 * al_pa = device AL_PA.
2391 * status = pointer for return status.
2392 * opt = command options.
2393 * TARGET_QUEUE_LOCK must be released.
2394 * ADAPTER_STATE_LOCK must be released.
2395 *
2396 * Returns:
2397 * qla2x00 local function return status code.
2398 *
2399 * Context:
2400 * Kernel context.
2401 */
2402 int
2403 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2404 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2405 {
2406 int rval;
2407 mbx_cmd_t mc;
2408 mbx_cmd_t *mcp = &mc;
2409 struct qla_hw_data *ha = vha->hw;
2410
2411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2412 "Entered %s.\n", __func__);
2413
2414 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2415 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2416 if (HAS_EXTENDED_IDS(ha)) {
2417 mcp->mb[1] = loop_id;
2418 mcp->mb[10] = opt;
2419 mcp->out_mb |= MBX_10;
2420 } else {
2421 mcp->mb[1] = (loop_id << 8) | opt;
2422 }
2423 mcp->mb[2] = domain;
2424 mcp->mb[3] = area << 8 | al_pa;
2425
2426 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2427 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2428 mcp->flags = 0;
2429 rval = qla2x00_mailbox_command(vha, mcp);
2430
2431 /* Return mailbox statuses. */
2432 if (mb != NULL) {
2433 mb[0] = mcp->mb[0];
2434 mb[1] = mcp->mb[1];
2435 mb[2] = mcp->mb[2];
2436 mb[6] = mcp->mb[6];
2437 mb[7] = mcp->mb[7];
2438 /* COS retrieved from Get-Port-Database mailbox command. */
2439 mb[10] = 0;
2440 }
2441
2442 if (rval != QLA_SUCCESS) {
2443 /* RLU tmp code: need to change main mailbox_command function to
2444 * return ok even when the mailbox completion value is not
2445 * SUCCESS. The caller needs to be responsible to interpret
2446 * the return values of this mailbox command if we're not
2447 * to change too much of the existing code.
2448 */
2449 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2450 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2451 mcp->mb[0] == 0x4006)
2452 rval = QLA_SUCCESS;
2453
2454 /*EMPTY*/
2455 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2456 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2457 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2458 } else {
2459 /*EMPTY*/
2460 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2461 "Done %s.\n", __func__);
2462 }
2463
2464 return rval;
2465 }
2466
2467 /*
2468 * qla2x00_login_local_device
2469 * Issue login loop port mailbox command.
2470 *
2471 * Input:
2472 * ha = adapter block pointer.
2473 * loop_id = device loop ID.
2474 * opt = command options.
2475 *
2476 * Returns:
2477 * Return status code.
2478 *
2479 * Context:
2480 * Kernel context.
2481 *
2482 */
2483 int
2484 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2485 uint16_t *mb_ret, uint8_t opt)
2486 {
2487 int rval;
2488 mbx_cmd_t mc;
2489 mbx_cmd_t *mcp = &mc;
2490 struct qla_hw_data *ha = vha->hw;
2491
2492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2493 "Entered %s.\n", __func__);
2494
2495 if (IS_FWI2_CAPABLE(ha))
2496 return qla24xx_login_fabric(vha, fcport->loop_id,
2497 fcport->d_id.b.domain, fcport->d_id.b.area,
2498 fcport->d_id.b.al_pa, mb_ret, opt);
2499
2500 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2501 if (HAS_EXTENDED_IDS(ha))
2502 mcp->mb[1] = fcport->loop_id;
2503 else
2504 mcp->mb[1] = fcport->loop_id << 8;
2505 mcp->mb[2] = opt;
2506 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2507 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2508 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2509 mcp->flags = 0;
2510 rval = qla2x00_mailbox_command(vha, mcp);
2511
2512 /* Return mailbox statuses. */
2513 if (mb_ret != NULL) {
2514 mb_ret[0] = mcp->mb[0];
2515 mb_ret[1] = mcp->mb[1];
2516 mb_ret[6] = mcp->mb[6];
2517 mb_ret[7] = mcp->mb[7];
2518 }
2519
2520 if (rval != QLA_SUCCESS) {
2521 /* AV tmp code: need to change main mailbox_command function to
2522 * return ok even when the mailbox completion value is not
2523 * SUCCESS. The caller needs to be responsible to interpret
2524 * the return values of this mailbox command if we're not
2525 * to change too much of the existing code.
2526 */
2527 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2528 rval = QLA_SUCCESS;
2529
2530 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2531 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2532 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2533 } else {
2534 /*EMPTY*/
2535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2536 "Done %s.\n", __func__);
2537 }
2538
2539 return (rval);
2540 }
2541
2542 int
2543 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2544 uint8_t area, uint8_t al_pa)
2545 {
2546 int rval;
2547 struct logio_entry_24xx *lg;
2548 dma_addr_t lg_dma;
2549 struct qla_hw_data *ha = vha->hw;
2550 struct req_que *req;
2551
2552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2553 "Entered %s.\n", __func__);
2554
2555 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2556 if (lg == NULL) {
2557 ql_log(ql_log_warn, vha, 0x106e,
2558 "Failed to allocate logout IOCB.\n");
2559 return QLA_MEMORY_ALLOC_FAILED;
2560 }
2561
2562 req = vha->req;
2563 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2564 lg->entry_count = 1;
2565 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2566 lg->nport_handle = cpu_to_le16(loop_id);
2567 lg->control_flags =
2568 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2569 LCF_FREE_NPORT);
2570 lg->port_id[0] = al_pa;
2571 lg->port_id[1] = area;
2572 lg->port_id[2] = domain;
2573 lg->vp_index = vha->vp_idx;
2574 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2575 (ha->r_a_tov / 10 * 2) + 2);
2576 if (rval != QLA_SUCCESS) {
2577 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2578 "Failed to issue logout IOCB (%x).\n", rval);
2579 } else if (lg->entry_status != 0) {
2580 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2581 "Failed to complete IOCB -- error status (%x).\n",
2582 lg->entry_status);
2583 rval = QLA_FUNCTION_FAILED;
2584 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2585 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2586 "Failed to complete IOCB -- completion status (%x) "
2587 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2588 le32_to_cpu(lg->io_parameter[0]),
2589 le32_to_cpu(lg->io_parameter[1]));
2590 } else {
2591 /*EMPTY*/
2592 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2593 "Done %s.\n", __func__);
2594 }
2595
2596 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2597
2598 return rval;
2599 }
2600
2601 /*
2602 * qla2x00_fabric_logout
2603 * Issue logout fabric port mailbox command.
2604 *
2605 * Input:
2606 * ha = adapter block pointer.
2607 * loop_id = device loop ID.
2608 * TARGET_QUEUE_LOCK must be released.
2609 * ADAPTER_STATE_LOCK must be released.
2610 *
2611 * Returns:
2612 * qla2x00 local function return status code.
2613 *
2614 * Context:
2615 * Kernel context.
2616 */
2617 int
2618 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2619 uint8_t area, uint8_t al_pa)
2620 {
2621 int rval;
2622 mbx_cmd_t mc;
2623 mbx_cmd_t *mcp = &mc;
2624
2625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2626 "Entered %s.\n", __func__);
2627
2628 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2629 mcp->out_mb = MBX_1|MBX_0;
2630 if (HAS_EXTENDED_IDS(vha->hw)) {
2631 mcp->mb[1] = loop_id;
2632 mcp->mb[10] = 0;
2633 mcp->out_mb |= MBX_10;
2634 } else {
2635 mcp->mb[1] = loop_id << 8;
2636 }
2637
2638 mcp->in_mb = MBX_1|MBX_0;
2639 mcp->tov = MBX_TOV_SECONDS;
2640 mcp->flags = 0;
2641 rval = qla2x00_mailbox_command(vha, mcp);
2642
2643 if (rval != QLA_SUCCESS) {
2644 /*EMPTY*/
2645 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2646 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2647 } else {
2648 /*EMPTY*/
2649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2650 "Done %s.\n", __func__);
2651 }
2652
2653 return rval;
2654 }
2655
2656 /*
2657 * qla2x00_full_login_lip
2658 * Issue full login LIP mailbox command.
2659 *
2660 * Input:
2661 * ha = adapter block pointer.
2662 * TARGET_QUEUE_LOCK must be released.
2663 * ADAPTER_STATE_LOCK must be released.
2664 *
2665 * Returns:
2666 * qla2x00 local function return status code.
2667 *
2668 * Context:
2669 * Kernel context.
2670 */
2671 int
2672 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2673 {
2674 int rval;
2675 mbx_cmd_t mc;
2676 mbx_cmd_t *mcp = &mc;
2677
2678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2679 "Entered %s.\n", __func__);
2680
2681 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2682 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2683 mcp->mb[2] = 0;
2684 mcp->mb[3] = 0;
2685 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2686 mcp->in_mb = MBX_0;
2687 mcp->tov = MBX_TOV_SECONDS;
2688 mcp->flags = 0;
2689 rval = qla2x00_mailbox_command(vha, mcp);
2690
2691 if (rval != QLA_SUCCESS) {
2692 /*EMPTY*/
2693 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2694 } else {
2695 /*EMPTY*/
2696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2697 "Done %s.\n", __func__);
2698 }
2699
2700 return rval;
2701 }
2702
2703 /*
2704 * qla2x00_get_id_list
2705 *
2706 * Input:
2707 * ha = adapter block pointer.
2708 *
2709 * Returns:
2710 * qla2x00 local function return status code.
2711 *
2712 * Context:
2713 * Kernel context.
2714 */
2715 int
2716 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2717 uint16_t *entries)
2718 {
2719 int rval;
2720 mbx_cmd_t mc;
2721 mbx_cmd_t *mcp = &mc;
2722
2723 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2724 "Entered %s.\n", __func__);
2725
2726 if (id_list == NULL)
2727 return QLA_FUNCTION_FAILED;
2728
2729 mcp->mb[0] = MBC_GET_ID_LIST;
2730 mcp->out_mb = MBX_0;
2731 if (IS_FWI2_CAPABLE(vha->hw)) {
2732 mcp->mb[2] = MSW(id_list_dma);
2733 mcp->mb[3] = LSW(id_list_dma);
2734 mcp->mb[6] = MSW(MSD(id_list_dma));
2735 mcp->mb[7] = LSW(MSD(id_list_dma));
2736 mcp->mb[8] = 0;
2737 mcp->mb[9] = vha->vp_idx;
2738 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2739 } else {
2740 mcp->mb[1] = MSW(id_list_dma);
2741 mcp->mb[2] = LSW(id_list_dma);
2742 mcp->mb[3] = MSW(MSD(id_list_dma));
2743 mcp->mb[6] = LSW(MSD(id_list_dma));
2744 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2745 }
2746 mcp->in_mb = MBX_1|MBX_0;
2747 mcp->tov = MBX_TOV_SECONDS;
2748 mcp->flags = 0;
2749 rval = qla2x00_mailbox_command(vha, mcp);
2750
2751 if (rval != QLA_SUCCESS) {
2752 /*EMPTY*/
2753 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2754 } else {
2755 *entries = mcp->mb[1];
2756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2757 "Done %s.\n", __func__);
2758 }
2759
2760 return rval;
2761 }
2762
2763 /*
2764 * qla2x00_get_resource_cnts
2765 * Get current firmware resource counts.
2766 *
2767 * Input:
2768 * ha = adapter block pointer.
2769 *
2770 * Returns:
2771 * qla2x00 local function return status code.
2772 *
2773 * Context:
2774 * Kernel context.
2775 */
2776 int
2777 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2778 {
2779 struct qla_hw_data *ha = vha->hw;
2780 int rval;
2781 mbx_cmd_t mc;
2782 mbx_cmd_t *mcp = &mc;
2783
2784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2785 "Entered %s.\n", __func__);
2786
2787 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2788 mcp->out_mb = MBX_0;
2789 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2790 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2791 mcp->in_mb |= MBX_12;
2792 mcp->tov = MBX_TOV_SECONDS;
2793 mcp->flags = 0;
2794 rval = qla2x00_mailbox_command(vha, mcp);
2795
2796 if (rval != QLA_SUCCESS) {
2797 /*EMPTY*/
2798 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2799 "Failed mb[0]=%x.\n", mcp->mb[0]);
2800 } else {
2801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2802 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2803 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2804 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2805 mcp->mb[11], mcp->mb[12]);
2806
2807 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2808 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2809 ha->cur_fw_xcb_count = mcp->mb[3];
2810 ha->orig_fw_xcb_count = mcp->mb[6];
2811 ha->cur_fw_iocb_count = mcp->mb[7];
2812 ha->orig_fw_iocb_count = mcp->mb[10];
2813 if (ha->flags.npiv_supported)
2814 ha->max_npiv_vports = mcp->mb[11];
2815 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2816 ha->fw_max_fcf_count = mcp->mb[12];
2817 }
2818
2819 return (rval);
2820 }
2821
2822 /*
2823 * qla2x00_get_fcal_position_map
2824 * Get FCAL (LILP) position map using mailbox command
2825 *
2826 * Input:
2827 * ha = adapter state pointer.
2828 * pos_map = buffer pointer (can be NULL).
2829 *
2830 * Returns:
2831 * qla2x00 local function return status code.
2832 *
2833 * Context:
2834 * Kernel context.
2835 */
2836 int
2837 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2838 {
2839 int rval;
2840 mbx_cmd_t mc;
2841 mbx_cmd_t *mcp = &mc;
2842 char *pmap;
2843 dma_addr_t pmap_dma;
2844 struct qla_hw_data *ha = vha->hw;
2845
2846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2847 "Entered %s.\n", __func__);
2848
2849 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2850 if (pmap == NULL) {
2851 ql_log(ql_log_warn, vha, 0x1080,
2852 "Memory alloc failed.\n");
2853 return QLA_MEMORY_ALLOC_FAILED;
2854 }
2855
2856 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2857 mcp->mb[2] = MSW(pmap_dma);
2858 mcp->mb[3] = LSW(pmap_dma);
2859 mcp->mb[6] = MSW(MSD(pmap_dma));
2860 mcp->mb[7] = LSW(MSD(pmap_dma));
2861 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2862 mcp->in_mb = MBX_1|MBX_0;
2863 mcp->buf_size = FCAL_MAP_SIZE;
2864 mcp->flags = MBX_DMA_IN;
2865 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2866 rval = qla2x00_mailbox_command(vha, mcp);
2867
2868 if (rval == QLA_SUCCESS) {
2869 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2870 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2871 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2872 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2873 pmap, pmap[0] + 1);
2874
2875 if (pos_map)
2876 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2877 }
2878 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2879
2880 if (rval != QLA_SUCCESS) {
2881 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2882 } else {
2883 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2884 "Done %s.\n", __func__);
2885 }
2886
2887 return rval;
2888 }
2889
2890 /*
2891 * qla2x00_get_link_status
2892 *
2893 * Input:
2894 * ha = adapter block pointer.
2895 * loop_id = device loop ID.
2896 * ret_buf = pointer to link status return buffer.
2897 *
2898 * Returns:
2899 * 0 = success.
2900 * BIT_0 = mem alloc error.
2901 * BIT_1 = mailbox error.
2902 */
2903 int
2904 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2905 struct link_statistics *stats, dma_addr_t stats_dma)
2906 {
2907 int rval;
2908 mbx_cmd_t mc;
2909 mbx_cmd_t *mcp = &mc;
2910 uint32_t *iter = (void *)stats;
2911 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2912 struct qla_hw_data *ha = vha->hw;
2913
2914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2915 "Entered %s.\n", __func__);
2916
2917 mcp->mb[0] = MBC_GET_LINK_STATUS;
2918 mcp->mb[2] = MSW(LSD(stats_dma));
2919 mcp->mb[3] = LSW(LSD(stats_dma));
2920 mcp->mb[6] = MSW(MSD(stats_dma));
2921 mcp->mb[7] = LSW(MSD(stats_dma));
2922 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2923 mcp->in_mb = MBX_0;
2924 if (IS_FWI2_CAPABLE(ha)) {
2925 mcp->mb[1] = loop_id;
2926 mcp->mb[4] = 0;
2927 mcp->mb[10] = 0;
2928 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2929 mcp->in_mb |= MBX_1;
2930 } else if (HAS_EXTENDED_IDS(ha)) {
2931 mcp->mb[1] = loop_id;
2932 mcp->mb[10] = 0;
2933 mcp->out_mb |= MBX_10|MBX_1;
2934 } else {
2935 mcp->mb[1] = loop_id << 8;
2936 mcp->out_mb |= MBX_1;
2937 }
2938 mcp->tov = MBX_TOV_SECONDS;
2939 mcp->flags = IOCTL_CMD;
2940 rval = qla2x00_mailbox_command(vha, mcp);
2941
2942 if (rval == QLA_SUCCESS) {
2943 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2944 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2945 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2946 rval = QLA_FUNCTION_FAILED;
2947 } else {
2948 /* Re-endianize - firmware data is le32. */
2949 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2950 "Done %s.\n", __func__);
2951 for ( ; dwords--; iter++)
2952 le32_to_cpus(iter);
2953 }
2954 } else {
2955 /* Failed. */
2956 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2957 }
2958
2959 return rval;
2960 }
2961
2962 int
2963 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2964 dma_addr_t stats_dma, uint16_t options)
2965 {
2966 int rval;
2967 mbx_cmd_t mc;
2968 mbx_cmd_t *mcp = &mc;
2969 uint32_t *iter, dwords;
2970
2971 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2972 "Entered %s.\n", __func__);
2973
2974 memset(&mc, 0, sizeof(mc));
2975 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2976 mc.mb[2] = MSW(stats_dma);
2977 mc.mb[3] = LSW(stats_dma);
2978 mc.mb[6] = MSW(MSD(stats_dma));
2979 mc.mb[7] = LSW(MSD(stats_dma));
2980 mc.mb[8] = sizeof(struct link_statistics) / 4;
2981 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2982 mc.mb[10] = cpu_to_le16(options);
2983
2984 rval = qla24xx_send_mb_cmd(vha, &mc);
2985
2986 if (rval == QLA_SUCCESS) {
2987 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2988 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2989 "Failed mb[0]=%x.\n", mcp->mb[0]);
2990 rval = QLA_FUNCTION_FAILED;
2991 } else {
2992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2993 "Done %s.\n", __func__);
2994 /* Re-endianize - firmware data is le32. */
2995 dwords = sizeof(struct link_statistics) / 4;
2996 iter = &stats->link_fail_cnt;
2997 for ( ; dwords--; iter++)
2998 le32_to_cpus(iter);
2999 }
3000 } else {
3001 /* Failed. */
3002 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3003 }
3004
3005 return rval;
3006 }
3007
3008 int
3009 qla24xx_abort_command(srb_t *sp)
3010 {
3011 int rval;
3012 unsigned long flags = 0;
3013
3014 struct abort_entry_24xx *abt;
3015 dma_addr_t abt_dma;
3016 uint32_t handle;
3017 fc_port_t *fcport = sp->fcport;
3018 struct scsi_qla_host *vha = fcport->vha;
3019 struct qla_hw_data *ha = vha->hw;
3020 struct req_que *req = vha->req;
3021
3022 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3023 "Entered %s.\n", __func__);
3024
3025 if (vha->flags.qpairs_available && sp->qpair)
3026 req = sp->qpair->req;
3027
3028 if (ql2xasynctmfenable)
3029 return qla24xx_async_abort_command(sp);
3030
3031 spin_lock_irqsave(&ha->hardware_lock, flags);
3032 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3033 if (req->outstanding_cmds[handle] == sp)
3034 break;
3035 }
3036 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3037 if (handle == req->num_outstanding_cmds) {
3038 /* Command not found. */
3039 return QLA_FUNCTION_FAILED;
3040 }
3041
3042 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3043 if (abt == NULL) {
3044 ql_log(ql_log_warn, vha, 0x108d,
3045 "Failed to allocate abort IOCB.\n");
3046 return QLA_MEMORY_ALLOC_FAILED;
3047 }
3048
3049 abt->entry_type = ABORT_IOCB_TYPE;
3050 abt->entry_count = 1;
3051 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3052 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3053 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3054 abt->port_id[0] = fcport->d_id.b.al_pa;
3055 abt->port_id[1] = fcport->d_id.b.area;
3056 abt->port_id[2] = fcport->d_id.b.domain;
3057 abt->vp_index = fcport->vha->vp_idx;
3058
3059 abt->req_que_no = cpu_to_le16(req->id);
3060
3061 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3062 if (rval != QLA_SUCCESS) {
3063 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3064 "Failed to issue IOCB (%x).\n", rval);
3065 } else if (abt->entry_status != 0) {
3066 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3067 "Failed to complete IOCB -- error status (%x).\n",
3068 abt->entry_status);
3069 rval = QLA_FUNCTION_FAILED;
3070 } else if (abt->nport_handle != cpu_to_le16(0)) {
3071 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3072 "Failed to complete IOCB -- completion status (%x).\n",
3073 le16_to_cpu(abt->nport_handle));
3074 if (abt->nport_handle == CS_IOCB_ERROR)
3075 rval = QLA_FUNCTION_PARAMETER_ERROR;
3076 else
3077 rval = QLA_FUNCTION_FAILED;
3078 } else {
3079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3080 "Done %s.\n", __func__);
3081 }
3082
3083 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3084
3085 return rval;
3086 }
3087
3088 struct tsk_mgmt_cmd {
3089 union {
3090 struct tsk_mgmt_entry tsk;
3091 struct sts_entry_24xx sts;
3092 } p;
3093 };
3094
3095 static int
3096 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3097 uint64_t l, int tag)
3098 {
3099 int rval, rval2;
3100 struct tsk_mgmt_cmd *tsk;
3101 struct sts_entry_24xx *sts;
3102 dma_addr_t tsk_dma;
3103 scsi_qla_host_t *vha;
3104 struct qla_hw_data *ha;
3105 struct req_que *req;
3106 struct rsp_que *rsp;
3107 struct qla_qpair *qpair;
3108
3109 vha = fcport->vha;
3110 ha = vha->hw;
3111 req = vha->req;
3112
3113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3114 "Entered %s.\n", __func__);
3115
3116 if (vha->vp_idx && vha->qpair) {
3117 /* NPIV port */
3118 qpair = vha->qpair;
3119 rsp = qpair->rsp;
3120 req = qpair->req;
3121 } else {
3122 rsp = req->rsp;
3123 }
3124
3125 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3126 if (tsk == NULL) {
3127 ql_log(ql_log_warn, vha, 0x1093,
3128 "Failed to allocate task management IOCB.\n");
3129 return QLA_MEMORY_ALLOC_FAILED;
3130 }
3131
3132 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3133 tsk->p.tsk.entry_count = 1;
3134 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3135 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3136 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3137 tsk->p.tsk.control_flags = cpu_to_le32(type);
3138 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3139 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3140 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3141 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3142 if (type == TCF_LUN_RESET) {
3143 int_to_scsilun(l, &tsk->p.tsk.lun);
3144 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3145 sizeof(tsk->p.tsk.lun));
3146 }
3147
3148 sts = &tsk->p.sts;
3149 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3150 if (rval != QLA_SUCCESS) {
3151 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3152 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3153 } else if (sts->entry_status != 0) {
3154 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3155 "Failed to complete IOCB -- error status (%x).\n",
3156 sts->entry_status);
3157 rval = QLA_FUNCTION_FAILED;
3158 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3159 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3160 "Failed to complete IOCB -- completion status (%x).\n",
3161 le16_to_cpu(sts->comp_status));
3162 rval = QLA_FUNCTION_FAILED;
3163 } else if (le16_to_cpu(sts->scsi_status) &
3164 SS_RESPONSE_INFO_LEN_VALID) {
3165 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3167 "Ignoring inconsistent data length -- not enough "
3168 "response info (%d).\n",
3169 le32_to_cpu(sts->rsp_data_len));
3170 } else if (sts->data[3]) {
3171 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3172 "Failed to complete IOCB -- response (%x).\n",
3173 sts->data[3]);
3174 rval = QLA_FUNCTION_FAILED;
3175 }
3176 }
3177
3178 /* Issue marker IOCB. */
3179 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3180 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3181 if (rval2 != QLA_SUCCESS) {
3182 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3183 "Failed to issue marker IOCB (%x).\n", rval2);
3184 } else {
3185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3186 "Done %s.\n", __func__);
3187 }
3188
3189 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3190
3191 return rval;
3192 }
3193
3194 int
3195 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3196 {
3197 struct qla_hw_data *ha = fcport->vha->hw;
3198
3199 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3200 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3201
3202 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3203 }
3204
3205 int
3206 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3207 {
3208 struct qla_hw_data *ha = fcport->vha->hw;
3209
3210 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3211 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3212
3213 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3214 }
3215
3216 int
3217 qla2x00_system_error(scsi_qla_host_t *vha)
3218 {
3219 int rval;
3220 mbx_cmd_t mc;
3221 mbx_cmd_t *mcp = &mc;
3222 struct qla_hw_data *ha = vha->hw;
3223
3224 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3225 return QLA_FUNCTION_FAILED;
3226
3227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3228 "Entered %s.\n", __func__);
3229
3230 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3231 mcp->out_mb = MBX_0;
3232 mcp->in_mb = MBX_0;
3233 mcp->tov = 5;
3234 mcp->flags = 0;
3235 rval = qla2x00_mailbox_command(vha, mcp);
3236
3237 if (rval != QLA_SUCCESS) {
3238 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3239 } else {
3240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3241 "Done %s.\n", __func__);
3242 }
3243
3244 return rval;
3245 }
3246
3247 int
3248 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3249 {
3250 int rval;
3251 mbx_cmd_t mc;
3252 mbx_cmd_t *mcp = &mc;
3253
3254 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3255 !IS_QLA27XX(vha->hw))
3256 return QLA_FUNCTION_FAILED;
3257
3258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3259 "Entered %s.\n", __func__);
3260
3261 mcp->mb[0] = MBC_WRITE_SERDES;
3262 mcp->mb[1] = addr;
3263 if (IS_QLA2031(vha->hw))
3264 mcp->mb[2] = data & 0xff;
3265 else
3266 mcp->mb[2] = data;
3267
3268 mcp->mb[3] = 0;
3269 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3270 mcp->in_mb = MBX_0;
3271 mcp->tov = MBX_TOV_SECONDS;
3272 mcp->flags = 0;
3273 rval = qla2x00_mailbox_command(vha, mcp);
3274
3275 if (rval != QLA_SUCCESS) {
3276 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3277 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3278 } else {
3279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3280 "Done %s.\n", __func__);
3281 }
3282
3283 return rval;
3284 }
3285
3286 int
3287 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3288 {
3289 int rval;
3290 mbx_cmd_t mc;
3291 mbx_cmd_t *mcp = &mc;
3292
3293 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3294 !IS_QLA27XX(vha->hw))
3295 return QLA_FUNCTION_FAILED;
3296
3297 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3298 "Entered %s.\n", __func__);
3299
3300 mcp->mb[0] = MBC_READ_SERDES;
3301 mcp->mb[1] = addr;
3302 mcp->mb[3] = 0;
3303 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3304 mcp->in_mb = MBX_1|MBX_0;
3305 mcp->tov = MBX_TOV_SECONDS;
3306 mcp->flags = 0;
3307 rval = qla2x00_mailbox_command(vha, mcp);
3308
3309 if (IS_QLA2031(vha->hw))
3310 *data = mcp->mb[1] & 0xff;
3311 else
3312 *data = mcp->mb[1];
3313
3314 if (rval != QLA_SUCCESS) {
3315 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3316 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3317 } else {
3318 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3319 "Done %s.\n", __func__);
3320 }
3321
3322 return rval;
3323 }
3324
3325 int
3326 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3327 {
3328 int rval;
3329 mbx_cmd_t mc;
3330 mbx_cmd_t *mcp = &mc;
3331
3332 if (!IS_QLA8044(vha->hw))
3333 return QLA_FUNCTION_FAILED;
3334
3335 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3336 "Entered %s.\n", __func__);
3337
3338 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3339 mcp->mb[1] = HCS_WRITE_SERDES;
3340 mcp->mb[3] = LSW(addr);
3341 mcp->mb[4] = MSW(addr);
3342 mcp->mb[5] = LSW(data);
3343 mcp->mb[6] = MSW(data);
3344 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3345 mcp->in_mb = MBX_0;
3346 mcp->tov = MBX_TOV_SECONDS;
3347 mcp->flags = 0;
3348 rval = qla2x00_mailbox_command(vha, mcp);
3349
3350 if (rval != QLA_SUCCESS) {
3351 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3352 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3353 } else {
3354 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3355 "Done %s.\n", __func__);
3356 }
3357
3358 return rval;
3359 }
3360
3361 int
3362 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3363 {
3364 int rval;
3365 mbx_cmd_t mc;
3366 mbx_cmd_t *mcp = &mc;
3367
3368 if (!IS_QLA8044(vha->hw))
3369 return QLA_FUNCTION_FAILED;
3370
3371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3372 "Entered %s.\n", __func__);
3373
3374 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3375 mcp->mb[1] = HCS_READ_SERDES;
3376 mcp->mb[3] = LSW(addr);
3377 mcp->mb[4] = MSW(addr);
3378 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3379 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3380 mcp->tov = MBX_TOV_SECONDS;
3381 mcp->flags = 0;
3382 rval = qla2x00_mailbox_command(vha, mcp);
3383
3384 *data = mcp->mb[2] << 16 | mcp->mb[1];
3385
3386 if (rval != QLA_SUCCESS) {
3387 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3389 } else {
3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3391 "Done %s.\n", __func__);
3392 }
3393
3394 return rval;
3395 }
3396
3397 /**
3398 * qla2x00_set_serdes_params() -
3399 * @vha: HA context
3400 * @sw_em_1g:
3401 * @sw_em_2g:
3402 * @sw_em_4g:
3403 *
3404 * Returns
3405 */
3406 int
3407 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3408 uint16_t sw_em_2g, uint16_t sw_em_4g)
3409 {
3410 int rval;
3411 mbx_cmd_t mc;
3412 mbx_cmd_t *mcp = &mc;
3413
3414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3415 "Entered %s.\n", __func__);
3416
3417 mcp->mb[0] = MBC_SERDES_PARAMS;
3418 mcp->mb[1] = BIT_0;
3419 mcp->mb[2] = sw_em_1g | BIT_15;
3420 mcp->mb[3] = sw_em_2g | BIT_15;
3421 mcp->mb[4] = sw_em_4g | BIT_15;
3422 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3423 mcp->in_mb = MBX_0;
3424 mcp->tov = MBX_TOV_SECONDS;
3425 mcp->flags = 0;
3426 rval = qla2x00_mailbox_command(vha, mcp);
3427
3428 if (rval != QLA_SUCCESS) {
3429 /*EMPTY*/
3430 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3431 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3432 } else {
3433 /*EMPTY*/
3434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3435 "Done %s.\n", __func__);
3436 }
3437
3438 return rval;
3439 }
3440
3441 int
3442 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3443 {
3444 int rval;
3445 mbx_cmd_t mc;
3446 mbx_cmd_t *mcp = &mc;
3447
3448 if (!IS_FWI2_CAPABLE(vha->hw))
3449 return QLA_FUNCTION_FAILED;
3450
3451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3452 "Entered %s.\n", __func__);
3453
3454 mcp->mb[0] = MBC_STOP_FIRMWARE;
3455 mcp->mb[1] = 0;
3456 mcp->out_mb = MBX_1|MBX_0;
3457 mcp->in_mb = MBX_0;
3458 mcp->tov = 5;
3459 mcp->flags = 0;
3460 rval = qla2x00_mailbox_command(vha, mcp);
3461
3462 if (rval != QLA_SUCCESS) {
3463 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3464 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3465 rval = QLA_INVALID_COMMAND;
3466 } else {
3467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3468 "Done %s.\n", __func__);
3469 }
3470
3471 return rval;
3472 }
3473
3474 int
3475 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3476 uint16_t buffers)
3477 {
3478 int rval;
3479 mbx_cmd_t mc;
3480 mbx_cmd_t *mcp = &mc;
3481
3482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3483 "Entered %s.\n", __func__);
3484
3485 if (!IS_FWI2_CAPABLE(vha->hw))
3486 return QLA_FUNCTION_FAILED;
3487
3488 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3489 return QLA_FUNCTION_FAILED;
3490
3491 mcp->mb[0] = MBC_TRACE_CONTROL;
3492 mcp->mb[1] = TC_EFT_ENABLE;
3493 mcp->mb[2] = LSW(eft_dma);
3494 mcp->mb[3] = MSW(eft_dma);
3495 mcp->mb[4] = LSW(MSD(eft_dma));
3496 mcp->mb[5] = MSW(MSD(eft_dma));
3497 mcp->mb[6] = buffers;
3498 mcp->mb[7] = TC_AEN_DISABLE;
3499 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3500 mcp->in_mb = MBX_1|MBX_0;
3501 mcp->tov = MBX_TOV_SECONDS;
3502 mcp->flags = 0;
3503 rval = qla2x00_mailbox_command(vha, mcp);
3504 if (rval != QLA_SUCCESS) {
3505 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3506 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3507 rval, mcp->mb[0], mcp->mb[1]);
3508 } else {
3509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3510 "Done %s.\n", __func__);
3511 }
3512
3513 return rval;
3514 }
3515
3516 int
3517 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3518 {
3519 int rval;
3520 mbx_cmd_t mc;
3521 mbx_cmd_t *mcp = &mc;
3522
3523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3524 "Entered %s.\n", __func__);
3525
3526 if (!IS_FWI2_CAPABLE(vha->hw))
3527 return QLA_FUNCTION_FAILED;
3528
3529 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3530 return QLA_FUNCTION_FAILED;
3531
3532 mcp->mb[0] = MBC_TRACE_CONTROL;
3533 mcp->mb[1] = TC_EFT_DISABLE;
3534 mcp->out_mb = MBX_1|MBX_0;
3535 mcp->in_mb = MBX_1|MBX_0;
3536 mcp->tov = MBX_TOV_SECONDS;
3537 mcp->flags = 0;
3538 rval = qla2x00_mailbox_command(vha, mcp);
3539 if (rval != QLA_SUCCESS) {
3540 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3541 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3542 rval, mcp->mb[0], mcp->mb[1]);
3543 } else {
3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3545 "Done %s.\n", __func__);
3546 }
3547
3548 return rval;
3549 }
3550
3551 int
3552 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3553 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3554 {
3555 int rval;
3556 mbx_cmd_t mc;
3557 mbx_cmd_t *mcp = &mc;
3558
3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3560 "Entered %s.\n", __func__);
3561
3562 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3563 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3564 return QLA_FUNCTION_FAILED;
3565
3566 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3567 return QLA_FUNCTION_FAILED;
3568
3569 mcp->mb[0] = MBC_TRACE_CONTROL;
3570 mcp->mb[1] = TC_FCE_ENABLE;
3571 mcp->mb[2] = LSW(fce_dma);
3572 mcp->mb[3] = MSW(fce_dma);
3573 mcp->mb[4] = LSW(MSD(fce_dma));
3574 mcp->mb[5] = MSW(MSD(fce_dma));
3575 mcp->mb[6] = buffers;
3576 mcp->mb[7] = TC_AEN_DISABLE;
3577 mcp->mb[8] = 0;
3578 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3579 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3580 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3581 MBX_1|MBX_0;
3582 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3583 mcp->tov = MBX_TOV_SECONDS;
3584 mcp->flags = 0;
3585 rval = qla2x00_mailbox_command(vha, mcp);
3586 if (rval != QLA_SUCCESS) {
3587 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3588 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3589 rval, mcp->mb[0], mcp->mb[1]);
3590 } else {
3591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3592 "Done %s.\n", __func__);
3593
3594 if (mb)
3595 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3596 if (dwords)
3597 *dwords = buffers;
3598 }
3599
3600 return rval;
3601 }
3602
3603 int
3604 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3605 {
3606 int rval;
3607 mbx_cmd_t mc;
3608 mbx_cmd_t *mcp = &mc;
3609
3610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3611 "Entered %s.\n", __func__);
3612
3613 if (!IS_FWI2_CAPABLE(vha->hw))
3614 return QLA_FUNCTION_FAILED;
3615
3616 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3617 return QLA_FUNCTION_FAILED;
3618
3619 mcp->mb[0] = MBC_TRACE_CONTROL;
3620 mcp->mb[1] = TC_FCE_DISABLE;
3621 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3622 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3623 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3624 MBX_1|MBX_0;
3625 mcp->tov = MBX_TOV_SECONDS;
3626 mcp->flags = 0;
3627 rval = qla2x00_mailbox_command(vha, mcp);
3628 if (rval != QLA_SUCCESS) {
3629 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3630 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3631 rval, mcp->mb[0], mcp->mb[1]);
3632 } else {
3633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3634 "Done %s.\n", __func__);
3635
3636 if (wr)
3637 *wr = (uint64_t) mcp->mb[5] << 48 |
3638 (uint64_t) mcp->mb[4] << 32 |
3639 (uint64_t) mcp->mb[3] << 16 |
3640 (uint64_t) mcp->mb[2];
3641 if (rd)
3642 *rd = (uint64_t) mcp->mb[9] << 48 |
3643 (uint64_t) mcp->mb[8] << 32 |
3644 (uint64_t) mcp->mb[7] << 16 |
3645 (uint64_t) mcp->mb[6];
3646 }
3647
3648 return rval;
3649 }
3650
3651 int
3652 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3653 uint16_t *port_speed, uint16_t *mb)
3654 {
3655 int rval;
3656 mbx_cmd_t mc;
3657 mbx_cmd_t *mcp = &mc;
3658
3659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3660 "Entered %s.\n", __func__);
3661
3662 if (!IS_IIDMA_CAPABLE(vha->hw))
3663 return QLA_FUNCTION_FAILED;
3664
3665 mcp->mb[0] = MBC_PORT_PARAMS;
3666 mcp->mb[1] = loop_id;
3667 mcp->mb[2] = mcp->mb[3] = 0;
3668 mcp->mb[9] = vha->vp_idx;
3669 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3670 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3671 mcp->tov = MBX_TOV_SECONDS;
3672 mcp->flags = 0;
3673 rval = qla2x00_mailbox_command(vha, mcp);
3674
3675 /* Return mailbox statuses. */
3676 if (mb != NULL) {
3677 mb[0] = mcp->mb[0];
3678 mb[1] = mcp->mb[1];
3679 mb[3] = mcp->mb[3];
3680 }
3681
3682 if (rval != QLA_SUCCESS) {
3683 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3684 } else {
3685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3686 "Done %s.\n", __func__);
3687 if (port_speed)
3688 *port_speed = mcp->mb[3];
3689 }
3690
3691 return rval;
3692 }
3693
3694 int
3695 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3696 uint16_t port_speed, uint16_t *mb)
3697 {
3698 int rval;
3699 mbx_cmd_t mc;
3700 mbx_cmd_t *mcp = &mc;
3701
3702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3703 "Entered %s.\n", __func__);
3704
3705 if (!IS_IIDMA_CAPABLE(vha->hw))
3706 return QLA_FUNCTION_FAILED;
3707
3708 mcp->mb[0] = MBC_PORT_PARAMS;
3709 mcp->mb[1] = loop_id;
3710 mcp->mb[2] = BIT_0;
3711 if (IS_CNA_CAPABLE(vha->hw))
3712 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3713 else
3714 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3715 mcp->mb[9] = vha->vp_idx;
3716 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3717 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3718 mcp->tov = MBX_TOV_SECONDS;
3719 mcp->flags = 0;
3720 rval = qla2x00_mailbox_command(vha, mcp);
3721
3722 /* Return mailbox statuses. */
3723 if (mb != NULL) {
3724 mb[0] = mcp->mb[0];
3725 mb[1] = mcp->mb[1];
3726 mb[3] = mcp->mb[3];
3727 }
3728
3729 if (rval != QLA_SUCCESS) {
3730 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3731 "Failed=%x.\n", rval);
3732 } else {
3733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3734 "Done %s.\n", __func__);
3735 }
3736
3737 return rval;
3738 }
3739
3740 void
3741 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3742 struct vp_rpt_id_entry_24xx *rptid_entry)
3743 {
3744 struct qla_hw_data *ha = vha->hw;
3745 scsi_qla_host_t *vp = NULL;
3746 unsigned long flags;
3747 int found;
3748 port_id_t id;
3749 struct fc_port *fcport;
3750
3751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3752 "Entered %s.\n", __func__);
3753
3754 if (rptid_entry->entry_status != 0)
3755 return;
3756
3757 id.b.domain = rptid_entry->port_id[2];
3758 id.b.area = rptid_entry->port_id[1];
3759 id.b.al_pa = rptid_entry->port_id[0];
3760 id.b.rsvd_1 = 0;
3761 ha->flags.n2n_ae = 0;
3762
3763 if (rptid_entry->format == 0) {
3764 /* loop */
3765 ql_dbg(ql_dbg_async, vha, 0x10b7,
3766 "Format 0 : Number of VPs setup %d, number of "
3767 "VPs acquired %d.\n", rptid_entry->vp_setup,
3768 rptid_entry->vp_acquired);
3769 ql_dbg(ql_dbg_async, vha, 0x10b8,
3770 "Primary port id %02x%02x%02x.\n",
3771 rptid_entry->port_id[2], rptid_entry->port_id[1],
3772 rptid_entry->port_id[0]);
3773 ha->current_topology = ISP_CFG_NL;
3774 qlt_update_host_map(vha, id);
3775
3776 } else if (rptid_entry->format == 1) {
3777 /* fabric */
3778 ql_dbg(ql_dbg_async, vha, 0x10b9,
3779 "Format 1: VP[%d] enabled - status %d - with "
3780 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3781 rptid_entry->vp_status,
3782 rptid_entry->port_id[2], rptid_entry->port_id[1],
3783 rptid_entry->port_id[0]);
3784 ql_dbg(ql_dbg_async, vha, 0x5075,
3785 "Format 1: Remote WWPN %8phC.\n",
3786 rptid_entry->u.f1.port_name);
3787
3788 ql_dbg(ql_dbg_async, vha, 0x5075,
3789 "Format 1: WWPN %8phC.\n",
3790 vha->port_name);
3791
3792 /* N2N. direct connect */
3793 if (IS_QLA27XX(ha) &&
3794 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
3795 /* if our portname is higher then initiate N2N login */
3796 if (wwn_to_u64(vha->port_name) >
3797 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3798 // ??? qlt_update_host_map(vha, id);
3799 vha->n2n_id = 0x1;
3800 ql_dbg(ql_dbg_async, vha, 0x5075,
3801 "Format 1: Setting n2n_update_needed for id %d\n",
3802 vha->n2n_id);
3803 } else {
3804 ql_dbg(ql_dbg_async, vha, 0x5075,
3805 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3806 rptid_entry->u.f1.port_name);
3807 }
3808
3809 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
3810 WWN_SIZE);
3811 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3812 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3813 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3814 ha->flags.n2n_ae = 1;
3815 return;
3816 }
3817
3818 ha->flags.gpsc_supported = 1;
3819 ha->current_topology = ISP_CFG_F;
3820 /* buffer to buffer credit flag */
3821 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3822
3823 if (rptid_entry->vp_idx == 0) {
3824 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3825 /* FA-WWN is only for physical port */
3826 if (qla_ini_mode_enabled(vha) &&
3827 ha->flags.fawwpn_enabled &&
3828 (rptid_entry->u.f1.flags &
3829 BIT_6)) {
3830 memcpy(vha->port_name,
3831 rptid_entry->u.f1.port_name,
3832 WWN_SIZE);
3833 }
3834
3835 qlt_update_host_map(vha, id);
3836 }
3837
3838 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3839 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3840 } else {
3841 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3842 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3843 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3844 "Could not acquire ID for VP[%d].\n",
3845 rptid_entry->vp_idx);
3846 return;
3847 }
3848
3849 found = 0;
3850 spin_lock_irqsave(&ha->vport_slock, flags);
3851 list_for_each_entry(vp, &ha->vp_list, list) {
3852 if (rptid_entry->vp_idx == vp->vp_idx) {
3853 found = 1;
3854 break;
3855 }
3856 }
3857 spin_unlock_irqrestore(&ha->vport_slock, flags);
3858
3859 if (!found)
3860 return;
3861
3862 qlt_update_host_map(vp, id);
3863
3864 /*
3865 * Cannot configure here as we are still sitting on the
3866 * response queue. Handle it in dpc context.
3867 */
3868 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3869 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3870 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3871 }
3872 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3873 qla2xxx_wake_dpc(vha);
3874 } else if (rptid_entry->format == 2) {
3875 ql_dbg(ql_dbg_async, vha, 0x505f,
3876 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3877 rptid_entry->port_id[2], rptid_entry->port_id[1],
3878 rptid_entry->port_id[0]);
3879
3880 ql_dbg(ql_dbg_async, vha, 0x5075,
3881 "N2N: Remote WWPN %8phC.\n",
3882 rptid_entry->u.f2.port_name);
3883
3884 /* N2N. direct connect */
3885 ha->current_topology = ISP_CFG_N;
3886 ha->flags.rida_fmt2 = 1;
3887 vha->d_id.b.domain = rptid_entry->port_id[2];
3888 vha->d_id.b.area = rptid_entry->port_id[1];
3889 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3890
3891 ha->flags.n2n_ae = 1;
3892 spin_lock_irqsave(&ha->vport_slock, flags);
3893 qlt_update_vp_map(vha, SET_AL_PA);
3894 spin_unlock_irqrestore(&ha->vport_slock, flags);
3895
3896 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3897 fcport->scan_state = QLA_FCPORT_SCAN;
3898 }
3899
3900 fcport = qla2x00_find_fcport_by_wwpn(vha,
3901 rptid_entry->u.f2.port_name, 1);
3902
3903 if (fcport) {
3904 fcport->plogi_nack_done_deadline = jiffies + HZ;
3905 fcport->scan_state = QLA_FCPORT_FOUND;
3906 switch (fcport->disc_state) {
3907 case DSC_DELETED:
3908 ql_dbg(ql_dbg_disc, vha, 0x210d,
3909 "%s %d %8phC login\n",
3910 __func__, __LINE__, fcport->port_name);
3911 qla24xx_fcport_handle_login(vha, fcport);
3912 break;
3913 case DSC_DELETE_PEND:
3914 break;
3915 default:
3916 qlt_schedule_sess_for_deletion(fcport);
3917 break;
3918 }
3919 } else {
3920 id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
3921 id.b.area = rptid_entry->u.f2.remote_nport_id[1];
3922 id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
3923 qla24xx_post_newsess_work(vha, &id,
3924 rptid_entry->u.f2.port_name,
3925 rptid_entry->u.f2.node_name,
3926 NULL,
3927 FC4_TYPE_UNKNOWN);
3928 }
3929 }
3930 }
3931
3932 /*
3933 * qla24xx_modify_vp_config
3934 * Change VP configuration for vha
3935 *
3936 * Input:
3937 * vha = adapter block pointer.
3938 *
3939 * Returns:
3940 * qla2xxx local function return status code.
3941 *
3942 * Context:
3943 * Kernel context.
3944 */
3945 int
3946 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3947 {
3948 int rval;
3949 struct vp_config_entry_24xx *vpmod;
3950 dma_addr_t vpmod_dma;
3951 struct qla_hw_data *ha = vha->hw;
3952 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3953
3954 /* This can be called by the parent */
3955
3956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3957 "Entered %s.\n", __func__);
3958
3959 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3960 if (!vpmod) {
3961 ql_log(ql_log_warn, vha, 0x10bc,
3962 "Failed to allocate modify VP IOCB.\n");
3963 return QLA_MEMORY_ALLOC_FAILED;
3964 }
3965
3966 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3967 vpmod->entry_count = 1;
3968 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3969 vpmod->vp_count = 1;
3970 vpmod->vp_index1 = vha->vp_idx;
3971 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3972
3973 qlt_modify_vp_config(vha, vpmod);
3974
3975 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3976 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3977 vpmod->entry_count = 1;
3978
3979 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3980 if (rval != QLA_SUCCESS) {
3981 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3982 "Failed to issue VP config IOCB (%x).\n", rval);
3983 } else if (vpmod->comp_status != 0) {
3984 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3985 "Failed to complete IOCB -- error status (%x).\n",
3986 vpmod->comp_status);
3987 rval = QLA_FUNCTION_FAILED;
3988 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3989 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3990 "Failed to complete IOCB -- completion status (%x).\n",
3991 le16_to_cpu(vpmod->comp_status));
3992 rval = QLA_FUNCTION_FAILED;
3993 } else {
3994 /* EMPTY */
3995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3996 "Done %s.\n", __func__);
3997 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3998 }
3999 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4000
4001 return rval;
4002 }
4003
4004 /*
4005 * qla2x00_send_change_request
4006 * Receive or disable RSCN request from fabric controller
4007 *
4008 * Input:
4009 * ha = adapter block pointer
4010 * format = registration format:
4011 * 0 - Reserved
4012 * 1 - Fabric detected registration
4013 * 2 - N_port detected registration
4014 * 3 - Full registration
4015 * FF - clear registration
4016 * vp_idx = Virtual port index
4017 *
4018 * Returns:
4019 * qla2x00 local function return status code.
4020 *
4021 * Context:
4022 * Kernel Context
4023 */
4024
4025 int
4026 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4027 uint16_t vp_idx)
4028 {
4029 int rval;
4030 mbx_cmd_t mc;
4031 mbx_cmd_t *mcp = &mc;
4032
4033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4034 "Entered %s.\n", __func__);
4035
4036 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4037 mcp->mb[1] = format;
4038 mcp->mb[9] = vp_idx;
4039 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4040 mcp->in_mb = MBX_0|MBX_1;
4041 mcp->tov = MBX_TOV_SECONDS;
4042 mcp->flags = 0;
4043 rval = qla2x00_mailbox_command(vha, mcp);
4044
4045 if (rval == QLA_SUCCESS) {
4046 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4047 rval = BIT_1;
4048 }
4049 } else
4050 rval = BIT_1;
4051
4052 return rval;
4053 }
4054
4055 int
4056 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4057 uint32_t size)
4058 {
4059 int rval;
4060 mbx_cmd_t mc;
4061 mbx_cmd_t *mcp = &mc;
4062
4063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4064 "Entered %s.\n", __func__);
4065
4066 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4067 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4068 mcp->mb[8] = MSW(addr);
4069 mcp->out_mb = MBX_8|MBX_0;
4070 } else {
4071 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4072 mcp->out_mb = MBX_0;
4073 }
4074 mcp->mb[1] = LSW(addr);
4075 mcp->mb[2] = MSW(req_dma);
4076 mcp->mb[3] = LSW(req_dma);
4077 mcp->mb[6] = MSW(MSD(req_dma));
4078 mcp->mb[7] = LSW(MSD(req_dma));
4079 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4080 if (IS_FWI2_CAPABLE(vha->hw)) {
4081 mcp->mb[4] = MSW(size);
4082 mcp->mb[5] = LSW(size);
4083 mcp->out_mb |= MBX_5|MBX_4;
4084 } else {
4085 mcp->mb[4] = LSW(size);
4086 mcp->out_mb |= MBX_4;
4087 }
4088
4089 mcp->in_mb = MBX_0;
4090 mcp->tov = MBX_TOV_SECONDS;
4091 mcp->flags = 0;
4092 rval = qla2x00_mailbox_command(vha, mcp);
4093
4094 if (rval != QLA_SUCCESS) {
4095 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4096 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4097 } else {
4098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4099 "Done %s.\n", __func__);
4100 }
4101
4102 return rval;
4103 }
4104 /* 84XX Support **************************************************************/
4105
4106 struct cs84xx_mgmt_cmd {
4107 union {
4108 struct verify_chip_entry_84xx req;
4109 struct verify_chip_rsp_84xx rsp;
4110 } p;
4111 };
4112
4113 int
4114 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4115 {
4116 int rval, retry;
4117 struct cs84xx_mgmt_cmd *mn;
4118 dma_addr_t mn_dma;
4119 uint16_t options;
4120 unsigned long flags;
4121 struct qla_hw_data *ha = vha->hw;
4122
4123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4124 "Entered %s.\n", __func__);
4125
4126 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4127 if (mn == NULL) {
4128 return QLA_MEMORY_ALLOC_FAILED;
4129 }
4130
4131 /* Force Update? */
4132 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4133 /* Diagnostic firmware? */
4134 /* options |= MENLO_DIAG_FW; */
4135 /* We update the firmware with only one data sequence. */
4136 options |= VCO_END_OF_DATA;
4137
4138 do {
4139 retry = 0;
4140 memset(mn, 0, sizeof(*mn));
4141 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4142 mn->p.req.entry_count = 1;
4143 mn->p.req.options = cpu_to_le16(options);
4144
4145 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4146 "Dump of Verify Request.\n");
4147 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4148 (uint8_t *)mn, sizeof(*mn));
4149
4150 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4151 if (rval != QLA_SUCCESS) {
4152 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4153 "Failed to issue verify IOCB (%x).\n", rval);
4154 goto verify_done;
4155 }
4156
4157 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4158 "Dump of Verify Response.\n");
4159 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4160 (uint8_t *)mn, sizeof(*mn));
4161
4162 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4163 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4164 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4165 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4166 "cs=%x fc=%x.\n", status[0], status[1]);
4167
4168 if (status[0] != CS_COMPLETE) {
4169 rval = QLA_FUNCTION_FAILED;
4170 if (!(options & VCO_DONT_UPDATE_FW)) {
4171 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4172 "Firmware update failed. Retrying "
4173 "without update firmware.\n");
4174 options |= VCO_DONT_UPDATE_FW;
4175 options &= ~VCO_FORCE_UPDATE;
4176 retry = 1;
4177 }
4178 } else {
4179 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4180 "Firmware updated to %x.\n",
4181 le32_to_cpu(mn->p.rsp.fw_ver));
4182
4183 /* NOTE: we only update OP firmware. */
4184 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4185 ha->cs84xx->op_fw_version =
4186 le32_to_cpu(mn->p.rsp.fw_ver);
4187 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4188 flags);
4189 }
4190 } while (retry);
4191
4192 verify_done:
4193 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4194
4195 if (rval != QLA_SUCCESS) {
4196 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4197 "Failed=%x.\n", rval);
4198 } else {
4199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4200 "Done %s.\n", __func__);
4201 }
4202
4203 return rval;
4204 }
4205
4206 int
4207 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4208 {
4209 int rval;
4210 unsigned long flags;
4211 mbx_cmd_t mc;
4212 mbx_cmd_t *mcp = &mc;
4213 struct qla_hw_data *ha = vha->hw;
4214
4215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4216 "Entered %s.\n", __func__);
4217
4218 if (IS_SHADOW_REG_CAPABLE(ha))
4219 req->options |= BIT_13;
4220
4221 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4222 mcp->mb[1] = req->options;
4223 mcp->mb[2] = MSW(LSD(req->dma));
4224 mcp->mb[3] = LSW(LSD(req->dma));
4225 mcp->mb[6] = MSW(MSD(req->dma));
4226 mcp->mb[7] = LSW(MSD(req->dma));
4227 mcp->mb[5] = req->length;
4228 if (req->rsp)
4229 mcp->mb[10] = req->rsp->id;
4230 mcp->mb[12] = req->qos;
4231 mcp->mb[11] = req->vp_idx;
4232 mcp->mb[13] = req->rid;
4233 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4234 mcp->mb[15] = 0;
4235
4236 mcp->mb[4] = req->id;
4237 /* que in ptr index */
4238 mcp->mb[8] = 0;
4239 /* que out ptr index */
4240 mcp->mb[9] = *req->out_ptr = 0;
4241 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4242 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4243 mcp->in_mb = MBX_0;
4244 mcp->flags = MBX_DMA_OUT;
4245 mcp->tov = MBX_TOV_SECONDS * 2;
4246
4247 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4248 mcp->in_mb |= MBX_1;
4249 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4250 mcp->out_mb |= MBX_15;
4251 /* debug q create issue in SR-IOV */
4252 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4253 }
4254
4255 spin_lock_irqsave(&ha->hardware_lock, flags);
4256 if (!(req->options & BIT_0)) {
4257 WRT_REG_DWORD(req->req_q_in, 0);
4258 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4259 WRT_REG_DWORD(req->req_q_out, 0);
4260 }
4261 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4262
4263 rval = qla2x00_mailbox_command(vha, mcp);
4264 if (rval != QLA_SUCCESS) {
4265 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4266 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4267 } else {
4268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4269 "Done %s.\n", __func__);
4270 }
4271
4272 return rval;
4273 }
4274
4275 int
4276 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4277 {
4278 int rval;
4279 unsigned long flags;
4280 mbx_cmd_t mc;
4281 mbx_cmd_t *mcp = &mc;
4282 struct qla_hw_data *ha = vha->hw;
4283
4284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4285 "Entered %s.\n", __func__);
4286
4287 if (IS_SHADOW_REG_CAPABLE(ha))
4288 rsp->options |= BIT_13;
4289
4290 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4291 mcp->mb[1] = rsp->options;
4292 mcp->mb[2] = MSW(LSD(rsp->dma));
4293 mcp->mb[3] = LSW(LSD(rsp->dma));
4294 mcp->mb[6] = MSW(MSD(rsp->dma));
4295 mcp->mb[7] = LSW(MSD(rsp->dma));
4296 mcp->mb[5] = rsp->length;
4297 mcp->mb[14] = rsp->msix->entry;
4298 mcp->mb[13] = rsp->rid;
4299 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4300 mcp->mb[15] = 0;
4301
4302 mcp->mb[4] = rsp->id;
4303 /* que in ptr index */
4304 mcp->mb[8] = *rsp->in_ptr = 0;
4305 /* que out ptr index */
4306 mcp->mb[9] = 0;
4307 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4308 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4309 mcp->in_mb = MBX_0;
4310 mcp->flags = MBX_DMA_OUT;
4311 mcp->tov = MBX_TOV_SECONDS * 2;
4312
4313 if (IS_QLA81XX(ha)) {
4314 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4315 mcp->in_mb |= MBX_1;
4316 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4317 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4318 mcp->in_mb |= MBX_1;
4319 /* debug q create issue in SR-IOV */
4320 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4321 }
4322
4323 spin_lock_irqsave(&ha->hardware_lock, flags);
4324 if (!(rsp->options & BIT_0)) {
4325 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4326 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4327 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4328 }
4329
4330 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4331
4332 rval = qla2x00_mailbox_command(vha, mcp);
4333 if (rval != QLA_SUCCESS) {
4334 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4335 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4336 } else {
4337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4338 "Done %s.\n", __func__);
4339 }
4340
4341 return rval;
4342 }
4343
4344 int
4345 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4346 {
4347 int rval;
4348 mbx_cmd_t mc;
4349 mbx_cmd_t *mcp = &mc;
4350
4351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4352 "Entered %s.\n", __func__);
4353
4354 mcp->mb[0] = MBC_IDC_ACK;
4355 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4356 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4357 mcp->in_mb = MBX_0;
4358 mcp->tov = MBX_TOV_SECONDS;
4359 mcp->flags = 0;
4360 rval = qla2x00_mailbox_command(vha, mcp);
4361
4362 if (rval != QLA_SUCCESS) {
4363 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4364 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4365 } else {
4366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4367 "Done %s.\n", __func__);
4368 }
4369
4370 return rval;
4371 }
4372
4373 int
4374 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4375 {
4376 int rval;
4377 mbx_cmd_t mc;
4378 mbx_cmd_t *mcp = &mc;
4379
4380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4381 "Entered %s.\n", __func__);
4382
4383 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4384 !IS_QLA27XX(vha->hw))
4385 return QLA_FUNCTION_FAILED;
4386
4387 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4388 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4389 mcp->out_mb = MBX_1|MBX_0;
4390 mcp->in_mb = MBX_1|MBX_0;
4391 mcp->tov = MBX_TOV_SECONDS;
4392 mcp->flags = 0;
4393 rval = qla2x00_mailbox_command(vha, mcp);
4394
4395 if (rval != QLA_SUCCESS) {
4396 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4397 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4398 rval, mcp->mb[0], mcp->mb[1]);
4399 } else {
4400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4401 "Done %s.\n", __func__);
4402 *sector_size = mcp->mb[1];
4403 }
4404
4405 return rval;
4406 }
4407
4408 int
4409 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4410 {
4411 int rval;
4412 mbx_cmd_t mc;
4413 mbx_cmd_t *mcp = &mc;
4414
4415 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4416 !IS_QLA27XX(vha->hw))
4417 return QLA_FUNCTION_FAILED;
4418
4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4420 "Entered %s.\n", __func__);
4421
4422 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4423 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4424 FAC_OPT_CMD_WRITE_PROTECT;
4425 mcp->out_mb = MBX_1|MBX_0;
4426 mcp->in_mb = MBX_1|MBX_0;
4427 mcp->tov = MBX_TOV_SECONDS;
4428 mcp->flags = 0;
4429 rval = qla2x00_mailbox_command(vha, mcp);
4430
4431 if (rval != QLA_SUCCESS) {
4432 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4433 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4434 rval, mcp->mb[0], mcp->mb[1]);
4435 } else {
4436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4437 "Done %s.\n", __func__);
4438 }
4439
4440 return rval;
4441 }
4442
4443 int
4444 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4445 {
4446 int rval;
4447 mbx_cmd_t mc;
4448 mbx_cmd_t *mcp = &mc;
4449
4450 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4451 !IS_QLA27XX(vha->hw))
4452 return QLA_FUNCTION_FAILED;
4453
4454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4455 "Entered %s.\n", __func__);
4456
4457 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4458 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4459 mcp->mb[2] = LSW(start);
4460 mcp->mb[3] = MSW(start);
4461 mcp->mb[4] = LSW(finish);
4462 mcp->mb[5] = MSW(finish);
4463 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4464 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4465 mcp->tov = MBX_TOV_SECONDS;
4466 mcp->flags = 0;
4467 rval = qla2x00_mailbox_command(vha, mcp);
4468
4469 if (rval != QLA_SUCCESS) {
4470 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4471 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4472 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4473 } else {
4474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4475 "Done %s.\n", __func__);
4476 }
4477
4478 return rval;
4479 }
4480
4481 int
4482 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4483 {
4484 int rval = 0;
4485 mbx_cmd_t mc;
4486 mbx_cmd_t *mcp = &mc;
4487
4488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4489 "Entered %s.\n", __func__);
4490
4491 mcp->mb[0] = MBC_RESTART_MPI_FW;
4492 mcp->out_mb = MBX_0;
4493 mcp->in_mb = MBX_0|MBX_1;
4494 mcp->tov = MBX_TOV_SECONDS;
4495 mcp->flags = 0;
4496 rval = qla2x00_mailbox_command(vha, mcp);
4497
4498 if (rval != QLA_SUCCESS) {
4499 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4500 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4501 rval, mcp->mb[0], mcp->mb[1]);
4502 } else {
4503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4504 "Done %s.\n", __func__);
4505 }
4506
4507 return rval;
4508 }
4509
4510 int
4511 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4512 {
4513 int rval;
4514 mbx_cmd_t mc;
4515 mbx_cmd_t *mcp = &mc;
4516 int i;
4517 int len;
4518 uint16_t *str;
4519 struct qla_hw_data *ha = vha->hw;
4520
4521 if (!IS_P3P_TYPE(ha))
4522 return QLA_FUNCTION_FAILED;
4523
4524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4525 "Entered %s.\n", __func__);
4526
4527 str = (void *)version;
4528 len = strlen(version);
4529
4530 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4531 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4532 mcp->out_mb = MBX_1|MBX_0;
4533 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4534 mcp->mb[i] = cpu_to_le16p(str);
4535 mcp->out_mb |= 1<<i;
4536 }
4537 for (; i < 16; i++) {
4538 mcp->mb[i] = 0;
4539 mcp->out_mb |= 1<<i;
4540 }
4541 mcp->in_mb = MBX_1|MBX_0;
4542 mcp->tov = MBX_TOV_SECONDS;
4543 mcp->flags = 0;
4544 rval = qla2x00_mailbox_command(vha, mcp);
4545
4546 if (rval != QLA_SUCCESS) {
4547 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4548 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4549 } else {
4550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4551 "Done %s.\n", __func__);
4552 }
4553
4554 return rval;
4555 }
4556
4557 int
4558 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4559 {
4560 int rval;
4561 mbx_cmd_t mc;
4562 mbx_cmd_t *mcp = &mc;
4563 int len;
4564 uint16_t dwlen;
4565 uint8_t *str;
4566 dma_addr_t str_dma;
4567 struct qla_hw_data *ha = vha->hw;
4568
4569 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4570 IS_P3P_TYPE(ha))
4571 return QLA_FUNCTION_FAILED;
4572
4573 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4574 "Entered %s.\n", __func__);
4575
4576 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4577 if (!str) {
4578 ql_log(ql_log_warn, vha, 0x117f,
4579 "Failed to allocate driver version param.\n");
4580 return QLA_MEMORY_ALLOC_FAILED;
4581 }
4582
4583 memcpy(str, "\x7\x3\x11\x0", 4);
4584 dwlen = str[0];
4585 len = dwlen * 4 - 4;
4586 memset(str + 4, 0, len);
4587 if (len > strlen(version))
4588 len = strlen(version);
4589 memcpy(str + 4, version, len);
4590
4591 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4592 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4593 mcp->mb[2] = MSW(LSD(str_dma));
4594 mcp->mb[3] = LSW(LSD(str_dma));
4595 mcp->mb[6] = MSW(MSD(str_dma));
4596 mcp->mb[7] = LSW(MSD(str_dma));
4597 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4598 mcp->in_mb = MBX_1|MBX_0;
4599 mcp->tov = MBX_TOV_SECONDS;
4600 mcp->flags = 0;
4601 rval = qla2x00_mailbox_command(vha, mcp);
4602
4603 if (rval != QLA_SUCCESS) {
4604 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4605 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4606 } else {
4607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4608 "Done %s.\n", __func__);
4609 }
4610
4611 dma_pool_free(ha->s_dma_pool, str, str_dma);
4612
4613 return rval;
4614 }
4615
4616 int
4617 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4618 void *buf, uint16_t bufsiz)
4619 {
4620 int rval, i;
4621 mbx_cmd_t mc;
4622 mbx_cmd_t *mcp = &mc;
4623 uint32_t *bp;
4624
4625 if (!IS_FWI2_CAPABLE(vha->hw))
4626 return QLA_FUNCTION_FAILED;
4627
4628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4629 "Entered %s.\n", __func__);
4630
4631 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4632 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4633 mcp->mb[2] = MSW(buf_dma);
4634 mcp->mb[3] = LSW(buf_dma);
4635 mcp->mb[6] = MSW(MSD(buf_dma));
4636 mcp->mb[7] = LSW(MSD(buf_dma));
4637 mcp->mb[8] = bufsiz/4;
4638 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4639 mcp->in_mb = MBX_1|MBX_0;
4640 mcp->tov = MBX_TOV_SECONDS;
4641 mcp->flags = 0;
4642 rval = qla2x00_mailbox_command(vha, mcp);
4643
4644 if (rval != QLA_SUCCESS) {
4645 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4646 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4647 } else {
4648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4649 "Done %s.\n", __func__);
4650 bp = (uint32_t *) buf;
4651 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4652 *bp = cpu_to_be32(*bp);
4653 }
4654
4655 return rval;
4656 }
4657
4658 static int
4659 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4660 {
4661 int rval;
4662 mbx_cmd_t mc;
4663 mbx_cmd_t *mcp = &mc;
4664
4665 if (!IS_FWI2_CAPABLE(vha->hw))
4666 return QLA_FUNCTION_FAILED;
4667
4668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4669 "Entered %s.\n", __func__);
4670
4671 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4672 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4673 mcp->out_mb = MBX_1|MBX_0;
4674 mcp->in_mb = MBX_1|MBX_0;
4675 mcp->tov = MBX_TOV_SECONDS;
4676 mcp->flags = 0;
4677 rval = qla2x00_mailbox_command(vha, mcp);
4678 *temp = mcp->mb[1];
4679
4680 if (rval != QLA_SUCCESS) {
4681 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4682 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4683 } else {
4684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4685 "Done %s.\n", __func__);
4686 }
4687
4688 return rval;
4689 }
4690
4691 int
4692 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4693 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4694 {
4695 int rval;
4696 mbx_cmd_t mc;
4697 mbx_cmd_t *mcp = &mc;
4698 struct qla_hw_data *ha = vha->hw;
4699
4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4701 "Entered %s.\n", __func__);
4702
4703 if (!IS_FWI2_CAPABLE(ha))
4704 return QLA_FUNCTION_FAILED;
4705
4706 if (len == 1)
4707 opt |= BIT_0;
4708
4709 mcp->mb[0] = MBC_READ_SFP;
4710 mcp->mb[1] = dev;
4711 mcp->mb[2] = MSW(sfp_dma);
4712 mcp->mb[3] = LSW(sfp_dma);
4713 mcp->mb[6] = MSW(MSD(sfp_dma));
4714 mcp->mb[7] = LSW(MSD(sfp_dma));
4715 mcp->mb[8] = len;
4716 mcp->mb[9] = off;
4717 mcp->mb[10] = opt;
4718 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4719 mcp->in_mb = MBX_1|MBX_0;
4720 mcp->tov = MBX_TOV_SECONDS;
4721 mcp->flags = 0;
4722 rval = qla2x00_mailbox_command(vha, mcp);
4723
4724 if (opt & BIT_0)
4725 *sfp = mcp->mb[1];
4726
4727 if (rval != QLA_SUCCESS) {
4728 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4729 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4730 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4731 mcp->mb[1] == 0x22)
4732 /* sfp is not there */
4733 rval = QLA_INTERFACE_ERROR;
4734 } else {
4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4736 "Done %s.\n", __func__);
4737 }
4738
4739 return rval;
4740 }
4741
4742 int
4743 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4744 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4745 {
4746 int rval;
4747 mbx_cmd_t mc;
4748 mbx_cmd_t *mcp = &mc;
4749 struct qla_hw_data *ha = vha->hw;
4750
4751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4752 "Entered %s.\n", __func__);
4753
4754 if (!IS_FWI2_CAPABLE(ha))
4755 return QLA_FUNCTION_FAILED;
4756
4757 if (len == 1)
4758 opt |= BIT_0;
4759
4760 if (opt & BIT_0)
4761 len = *sfp;
4762
4763 mcp->mb[0] = MBC_WRITE_SFP;
4764 mcp->mb[1] = dev;
4765 mcp->mb[2] = MSW(sfp_dma);
4766 mcp->mb[3] = LSW(sfp_dma);
4767 mcp->mb[6] = MSW(MSD(sfp_dma));
4768 mcp->mb[7] = LSW(MSD(sfp_dma));
4769 mcp->mb[8] = len;
4770 mcp->mb[9] = off;
4771 mcp->mb[10] = opt;
4772 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4773 mcp->in_mb = MBX_1|MBX_0;
4774 mcp->tov = MBX_TOV_SECONDS;
4775 mcp->flags = 0;
4776 rval = qla2x00_mailbox_command(vha, mcp);
4777
4778 if (rval != QLA_SUCCESS) {
4779 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4780 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4781 } else {
4782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4783 "Done %s.\n", __func__);
4784 }
4785
4786 return rval;
4787 }
4788
4789 int
4790 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4791 uint16_t size_in_bytes, uint16_t *actual_size)
4792 {
4793 int rval;
4794 mbx_cmd_t mc;
4795 mbx_cmd_t *mcp = &mc;
4796
4797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4798 "Entered %s.\n", __func__);
4799
4800 if (!IS_CNA_CAPABLE(vha->hw))
4801 return QLA_FUNCTION_FAILED;
4802
4803 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4804 mcp->mb[2] = MSW(stats_dma);
4805 mcp->mb[3] = LSW(stats_dma);
4806 mcp->mb[6] = MSW(MSD(stats_dma));
4807 mcp->mb[7] = LSW(MSD(stats_dma));
4808 mcp->mb[8] = size_in_bytes >> 2;
4809 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4810 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4811 mcp->tov = MBX_TOV_SECONDS;
4812 mcp->flags = 0;
4813 rval = qla2x00_mailbox_command(vha, mcp);
4814
4815 if (rval != QLA_SUCCESS) {
4816 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4817 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4818 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4819 } else {
4820 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4821 "Done %s.\n", __func__);
4822
4823
4824 *actual_size = mcp->mb[2] << 2;
4825 }
4826
4827 return rval;
4828 }
4829
4830 int
4831 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4832 uint16_t size)
4833 {
4834 int rval;
4835 mbx_cmd_t mc;
4836 mbx_cmd_t *mcp = &mc;
4837
4838 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4839 "Entered %s.\n", __func__);
4840
4841 if (!IS_CNA_CAPABLE(vha->hw))
4842 return QLA_FUNCTION_FAILED;
4843
4844 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4845 mcp->mb[1] = 0;
4846 mcp->mb[2] = MSW(tlv_dma);
4847 mcp->mb[3] = LSW(tlv_dma);
4848 mcp->mb[6] = MSW(MSD(tlv_dma));
4849 mcp->mb[7] = LSW(MSD(tlv_dma));
4850 mcp->mb[8] = size;
4851 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4852 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4853 mcp->tov = MBX_TOV_SECONDS;
4854 mcp->flags = 0;
4855 rval = qla2x00_mailbox_command(vha, mcp);
4856
4857 if (rval != QLA_SUCCESS) {
4858 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4859 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4860 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4861 } else {
4862 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4863 "Done %s.\n", __func__);
4864 }
4865
4866 return rval;
4867 }
4868
4869 int
4870 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4871 {
4872 int rval;
4873 mbx_cmd_t mc;
4874 mbx_cmd_t *mcp = &mc;
4875
4876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4877 "Entered %s.\n", __func__);
4878
4879 if (!IS_FWI2_CAPABLE(vha->hw))
4880 return QLA_FUNCTION_FAILED;
4881
4882 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4883 mcp->mb[1] = LSW(risc_addr);
4884 mcp->mb[8] = MSW(risc_addr);
4885 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4886 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4887 mcp->tov = 30;
4888 mcp->flags = 0;
4889 rval = qla2x00_mailbox_command(vha, mcp);
4890 if (rval != QLA_SUCCESS) {
4891 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4892 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4893 } else {
4894 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4895 "Done %s.\n", __func__);
4896 *data = mcp->mb[3] << 16 | mcp->mb[2];
4897 }
4898
4899 return rval;
4900 }
4901
4902 int
4903 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4904 uint16_t *mresp)
4905 {
4906 int rval;
4907 mbx_cmd_t mc;
4908 mbx_cmd_t *mcp = &mc;
4909
4910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4911 "Entered %s.\n", __func__);
4912
4913 memset(mcp->mb, 0 , sizeof(mcp->mb));
4914 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4915 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4916
4917 /* transfer count */
4918 mcp->mb[10] = LSW(mreq->transfer_size);
4919 mcp->mb[11] = MSW(mreq->transfer_size);
4920
4921 /* send data address */
4922 mcp->mb[14] = LSW(mreq->send_dma);
4923 mcp->mb[15] = MSW(mreq->send_dma);
4924 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4925 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4926
4927 /* receive data address */
4928 mcp->mb[16] = LSW(mreq->rcv_dma);
4929 mcp->mb[17] = MSW(mreq->rcv_dma);
4930 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4931 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4932
4933 /* Iteration count */
4934 mcp->mb[18] = LSW(mreq->iteration_count);
4935 mcp->mb[19] = MSW(mreq->iteration_count);
4936
4937 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4938 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4939 if (IS_CNA_CAPABLE(vha->hw))
4940 mcp->out_mb |= MBX_2;
4941 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4942
4943 mcp->buf_size = mreq->transfer_size;
4944 mcp->tov = MBX_TOV_SECONDS;
4945 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4946
4947 rval = qla2x00_mailbox_command(vha, mcp);
4948
4949 if (rval != QLA_SUCCESS) {
4950 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4951 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4952 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4953 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4954 } else {
4955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4956 "Done %s.\n", __func__);
4957 }
4958
4959 /* Copy mailbox information */
4960 memcpy( mresp, mcp->mb, 64);
4961 return rval;
4962 }
4963
4964 int
4965 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4966 uint16_t *mresp)
4967 {
4968 int rval;
4969 mbx_cmd_t mc;
4970 mbx_cmd_t *mcp = &mc;
4971 struct qla_hw_data *ha = vha->hw;
4972
4973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4974 "Entered %s.\n", __func__);
4975
4976 memset(mcp->mb, 0 , sizeof(mcp->mb));
4977 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4978 /* BIT_6 specifies 64bit address */
4979 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4980 if (IS_CNA_CAPABLE(ha)) {
4981 mcp->mb[2] = vha->fcoe_fcf_idx;
4982 }
4983 mcp->mb[16] = LSW(mreq->rcv_dma);
4984 mcp->mb[17] = MSW(mreq->rcv_dma);
4985 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4986 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4987
4988 mcp->mb[10] = LSW(mreq->transfer_size);
4989
4990 mcp->mb[14] = LSW(mreq->send_dma);
4991 mcp->mb[15] = MSW(mreq->send_dma);
4992 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4993 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4994
4995 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4996 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4997 if (IS_CNA_CAPABLE(ha))
4998 mcp->out_mb |= MBX_2;
4999
5000 mcp->in_mb = MBX_0;
5001 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5002 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5003 mcp->in_mb |= MBX_1;
5004 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5005 mcp->in_mb |= MBX_3;
5006
5007 mcp->tov = MBX_TOV_SECONDS;
5008 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5009 mcp->buf_size = mreq->transfer_size;
5010
5011 rval = qla2x00_mailbox_command(vha, mcp);
5012
5013 if (rval != QLA_SUCCESS) {
5014 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5015 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5016 rval, mcp->mb[0], mcp->mb[1]);
5017 } else {
5018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5019 "Done %s.\n", __func__);
5020 }
5021
5022 /* Copy mailbox information */
5023 memcpy(mresp, mcp->mb, 64);
5024 return rval;
5025 }
5026
5027 int
5028 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5029 {
5030 int rval;
5031 mbx_cmd_t mc;
5032 mbx_cmd_t *mcp = &mc;
5033
5034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5035 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5036
5037 mcp->mb[0] = MBC_ISP84XX_RESET;
5038 mcp->mb[1] = enable_diagnostic;
5039 mcp->out_mb = MBX_1|MBX_0;
5040 mcp->in_mb = MBX_1|MBX_0;
5041 mcp->tov = MBX_TOV_SECONDS;
5042 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5043 rval = qla2x00_mailbox_command(vha, mcp);
5044
5045 if (rval != QLA_SUCCESS)
5046 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5047 else
5048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5049 "Done %s.\n", __func__);
5050
5051 return rval;
5052 }
5053
5054 int
5055 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5056 {
5057 int rval;
5058 mbx_cmd_t mc;
5059 mbx_cmd_t *mcp = &mc;
5060
5061 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5062 "Entered %s.\n", __func__);
5063
5064 if (!IS_FWI2_CAPABLE(vha->hw))
5065 return QLA_FUNCTION_FAILED;
5066
5067 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5068 mcp->mb[1] = LSW(risc_addr);
5069 mcp->mb[2] = LSW(data);
5070 mcp->mb[3] = MSW(data);
5071 mcp->mb[8] = MSW(risc_addr);
5072 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5073 mcp->in_mb = MBX_0;
5074 mcp->tov = 30;
5075 mcp->flags = 0;
5076 rval = qla2x00_mailbox_command(vha, mcp);
5077 if (rval != QLA_SUCCESS) {
5078 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5080 } else {
5081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5082 "Done %s.\n", __func__);
5083 }
5084
5085 return rval;
5086 }
5087
5088 int
5089 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5090 {
5091 int rval;
5092 uint32_t stat, timer;
5093 uint16_t mb0 = 0;
5094 struct qla_hw_data *ha = vha->hw;
5095 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5096
5097 rval = QLA_SUCCESS;
5098
5099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5100 "Entered %s.\n", __func__);
5101
5102 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5103
5104 /* Write the MBC data to the registers */
5105 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5106 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5107 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5108 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5109 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5110
5111 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5112
5113 /* Poll for MBC interrupt */
5114 for (timer = 6000000; timer; timer--) {
5115 /* Check for pending interrupts. */
5116 stat = RD_REG_DWORD(&reg->host_status);
5117 if (stat & HSRX_RISC_INT) {
5118 stat &= 0xff;
5119
5120 if (stat == 0x1 || stat == 0x2 ||
5121 stat == 0x10 || stat == 0x11) {
5122 set_bit(MBX_INTERRUPT,
5123 &ha->mbx_cmd_flags);
5124 mb0 = RD_REG_WORD(&reg->mailbox0);
5125 WRT_REG_DWORD(&reg->hccr,
5126 HCCRX_CLR_RISC_INT);
5127 RD_REG_DWORD(&reg->hccr);
5128 break;
5129 }
5130 }
5131 udelay(5);
5132 }
5133
5134 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5135 rval = mb0 & MBS_MASK;
5136 else
5137 rval = QLA_FUNCTION_FAILED;
5138
5139 if (rval != QLA_SUCCESS) {
5140 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5141 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5142 } else {
5143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5144 "Done %s.\n", __func__);
5145 }
5146
5147 return rval;
5148 }
5149
5150 int
5151 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5152 {
5153 int rval;
5154 mbx_cmd_t mc;
5155 mbx_cmd_t *mcp = &mc;
5156 struct qla_hw_data *ha = vha->hw;
5157
5158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5159 "Entered %s.\n", __func__);
5160
5161 if (!IS_FWI2_CAPABLE(ha))
5162 return QLA_FUNCTION_FAILED;
5163
5164 mcp->mb[0] = MBC_DATA_RATE;
5165 mcp->mb[1] = 0;
5166 mcp->out_mb = MBX_1|MBX_0;
5167 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5168 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5169 mcp->in_mb |= MBX_3;
5170 mcp->tov = MBX_TOV_SECONDS;
5171 mcp->flags = 0;
5172 rval = qla2x00_mailbox_command(vha, mcp);
5173 if (rval != QLA_SUCCESS) {
5174 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5175 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5176 } else {
5177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5178 "Done %s.\n", __func__);
5179 if (mcp->mb[1] != 0x7)
5180 ha->link_data_rate = mcp->mb[1];
5181 }
5182
5183 return rval;
5184 }
5185
5186 int
5187 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5188 {
5189 int rval;
5190 mbx_cmd_t mc;
5191 mbx_cmd_t *mcp = &mc;
5192 struct qla_hw_data *ha = vha->hw;
5193
5194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5195 "Entered %s.\n", __func__);
5196
5197 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5198 !IS_QLA27XX(ha))
5199 return QLA_FUNCTION_FAILED;
5200 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5201 mcp->out_mb = MBX_0;
5202 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5203 mcp->tov = MBX_TOV_SECONDS;
5204 mcp->flags = 0;
5205
5206 rval = qla2x00_mailbox_command(vha, mcp);
5207
5208 if (rval != QLA_SUCCESS) {
5209 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5210 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5211 } else {
5212 /* Copy all bits to preserve original value */
5213 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5214
5215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5216 "Done %s.\n", __func__);
5217 }
5218 return rval;
5219 }
5220
5221 int
5222 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5223 {
5224 int rval;
5225 mbx_cmd_t mc;
5226 mbx_cmd_t *mcp = &mc;
5227
5228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5229 "Entered %s.\n", __func__);
5230
5231 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5232 /* Copy all bits to preserve original setting */
5233 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5234 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5235 mcp->in_mb = MBX_0;
5236 mcp->tov = MBX_TOV_SECONDS;
5237 mcp->flags = 0;
5238 rval = qla2x00_mailbox_command(vha, mcp);
5239
5240 if (rval != QLA_SUCCESS) {
5241 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5242 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5243 } else
5244 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5245 "Done %s.\n", __func__);
5246
5247 return rval;
5248 }
5249
5250
5251 int
5252 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5253 uint16_t *mb)
5254 {
5255 int rval;
5256 mbx_cmd_t mc;
5257 mbx_cmd_t *mcp = &mc;
5258 struct qla_hw_data *ha = vha->hw;
5259
5260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5261 "Entered %s.\n", __func__);
5262
5263 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5264 return QLA_FUNCTION_FAILED;
5265
5266 mcp->mb[0] = MBC_PORT_PARAMS;
5267 mcp->mb[1] = loop_id;
5268 if (ha->flags.fcp_prio_enabled)
5269 mcp->mb[2] = BIT_1;
5270 else
5271 mcp->mb[2] = BIT_2;
5272 mcp->mb[4] = priority & 0xf;
5273 mcp->mb[9] = vha->vp_idx;
5274 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5275 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5276 mcp->tov = 30;
5277 mcp->flags = 0;
5278 rval = qla2x00_mailbox_command(vha, mcp);
5279 if (mb != NULL) {
5280 mb[0] = mcp->mb[0];
5281 mb[1] = mcp->mb[1];
5282 mb[3] = mcp->mb[3];
5283 mb[4] = mcp->mb[4];
5284 }
5285
5286 if (rval != QLA_SUCCESS) {
5287 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5288 } else {
5289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5290 "Done %s.\n", __func__);
5291 }
5292
5293 return rval;
5294 }
5295
5296 int
5297 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5298 {
5299 int rval = QLA_FUNCTION_FAILED;
5300 struct qla_hw_data *ha = vha->hw;
5301 uint8_t byte;
5302
5303 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5304 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5305 "Thermal not supported by this card.\n");
5306 return rval;
5307 }
5308
5309 if (IS_QLA25XX(ha)) {
5310 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5311 ha->pdev->subsystem_device == 0x0175) {
5312 rval = qla2x00_read_sfp(vha, 0, &byte,
5313 0x98, 0x1, 1, BIT_13|BIT_0);
5314 *temp = byte;
5315 return rval;
5316 }
5317 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5318 ha->pdev->subsystem_device == 0x338e) {
5319 rval = qla2x00_read_sfp(vha, 0, &byte,
5320 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5321 *temp = byte;
5322 return rval;
5323 }
5324 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5325 "Thermal not supported by this card.\n");
5326 return rval;
5327 }
5328
5329 if (IS_QLA82XX(ha)) {
5330 *temp = qla82xx_read_temperature(vha);
5331 rval = QLA_SUCCESS;
5332 return rval;
5333 } else if (IS_QLA8044(ha)) {
5334 *temp = qla8044_read_temperature(vha);
5335 rval = QLA_SUCCESS;
5336 return rval;
5337 }
5338
5339 rval = qla2x00_read_asic_temperature(vha, temp);
5340 return rval;
5341 }
5342
5343 int
5344 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5345 {
5346 int rval;
5347 struct qla_hw_data *ha = vha->hw;
5348 mbx_cmd_t mc;
5349 mbx_cmd_t *mcp = &mc;
5350
5351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5352 "Entered %s.\n", __func__);
5353
5354 if (!IS_FWI2_CAPABLE(ha))
5355 return QLA_FUNCTION_FAILED;
5356
5357 memset(mcp, 0, sizeof(mbx_cmd_t));
5358 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5359 mcp->mb[1] = 1;
5360
5361 mcp->out_mb = MBX_1|MBX_0;
5362 mcp->in_mb = MBX_0;
5363 mcp->tov = 30;
5364 mcp->flags = 0;
5365
5366 rval = qla2x00_mailbox_command(vha, mcp);
5367 if (rval != QLA_SUCCESS) {
5368 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5369 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5370 } else {
5371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5372 "Done %s.\n", __func__);
5373 }
5374
5375 return rval;
5376 }
5377
5378 int
5379 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5380 {
5381 int rval;
5382 struct qla_hw_data *ha = vha->hw;
5383 mbx_cmd_t mc;
5384 mbx_cmd_t *mcp = &mc;
5385
5386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5387 "Entered %s.\n", __func__);
5388
5389 if (!IS_P3P_TYPE(ha))
5390 return QLA_FUNCTION_FAILED;
5391
5392 memset(mcp, 0, sizeof(mbx_cmd_t));
5393 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5394 mcp->mb[1] = 0;
5395
5396 mcp->out_mb = MBX_1|MBX_0;
5397 mcp->in_mb = MBX_0;
5398 mcp->tov = 30;
5399 mcp->flags = 0;
5400
5401 rval = qla2x00_mailbox_command(vha, mcp);
5402 if (rval != QLA_SUCCESS) {
5403 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5404 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5405 } else {
5406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5407 "Done %s.\n", __func__);
5408 }
5409
5410 return rval;
5411 }
5412
5413 int
5414 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5415 {
5416 struct qla_hw_data *ha = vha->hw;
5417 mbx_cmd_t mc;
5418 mbx_cmd_t *mcp = &mc;
5419 int rval = QLA_FUNCTION_FAILED;
5420
5421 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5422 "Entered %s.\n", __func__);
5423
5424 memset(mcp->mb, 0 , sizeof(mcp->mb));
5425 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5426 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5427 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5428 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5429
5430 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5431 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5432 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5433
5434 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5435 mcp->tov = MBX_TOV_SECONDS;
5436 rval = qla2x00_mailbox_command(vha, mcp);
5437
5438 /* Always copy back return mailbox values. */
5439 if (rval != QLA_SUCCESS) {
5440 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5441 "mailbox command FAILED=0x%x, subcode=%x.\n",
5442 (mcp->mb[1] << 16) | mcp->mb[0],
5443 (mcp->mb[3] << 16) | mcp->mb[2]);
5444 } else {
5445 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5446 "Done %s.\n", __func__);
5447 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5448 if (!ha->md_template_size) {
5449 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5450 "Null template size obtained.\n");
5451 rval = QLA_FUNCTION_FAILED;
5452 }
5453 }
5454 return rval;
5455 }
5456
5457 int
5458 qla82xx_md_get_template(scsi_qla_host_t *vha)
5459 {
5460 struct qla_hw_data *ha = vha->hw;
5461 mbx_cmd_t mc;
5462 mbx_cmd_t *mcp = &mc;
5463 int rval = QLA_FUNCTION_FAILED;
5464
5465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5466 "Entered %s.\n", __func__);
5467
5468 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5469 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5470 if (!ha->md_tmplt_hdr) {
5471 ql_log(ql_log_warn, vha, 0x1124,
5472 "Unable to allocate memory for Minidump template.\n");
5473 return rval;
5474 }
5475
5476 memset(mcp->mb, 0 , sizeof(mcp->mb));
5477 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5478 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5479 mcp->mb[2] = LSW(RQST_TMPLT);
5480 mcp->mb[3] = MSW(RQST_TMPLT);
5481 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5482 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5483 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5484 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5485 mcp->mb[8] = LSW(ha->md_template_size);
5486 mcp->mb[9] = MSW(ha->md_template_size);
5487
5488 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5489 mcp->tov = MBX_TOV_SECONDS;
5490 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5491 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5492 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5493 rval = qla2x00_mailbox_command(vha, mcp);
5494
5495 if (rval != QLA_SUCCESS) {
5496 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5497 "mailbox command FAILED=0x%x, subcode=%x.\n",
5498 ((mcp->mb[1] << 16) | mcp->mb[0]),
5499 ((mcp->mb[3] << 16) | mcp->mb[2]));
5500 } else
5501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5502 "Done %s.\n", __func__);
5503 return rval;
5504 }
5505
5506 int
5507 qla8044_md_get_template(scsi_qla_host_t *vha)
5508 {
5509 struct qla_hw_data *ha = vha->hw;
5510 mbx_cmd_t mc;
5511 mbx_cmd_t *mcp = &mc;
5512 int rval = QLA_FUNCTION_FAILED;
5513 int offset = 0, size = MINIDUMP_SIZE_36K;
5514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5515 "Entered %s.\n", __func__);
5516
5517 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5518 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5519 if (!ha->md_tmplt_hdr) {
5520 ql_log(ql_log_warn, vha, 0xb11b,
5521 "Unable to allocate memory for Minidump template.\n");
5522 return rval;
5523 }
5524
5525 memset(mcp->mb, 0 , sizeof(mcp->mb));
5526 while (offset < ha->md_template_size) {
5527 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5528 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5529 mcp->mb[2] = LSW(RQST_TMPLT);
5530 mcp->mb[3] = MSW(RQST_TMPLT);
5531 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5532 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5533 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5534 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5535 mcp->mb[8] = LSW(size);
5536 mcp->mb[9] = MSW(size);
5537 mcp->mb[10] = offset & 0x0000FFFF;
5538 mcp->mb[11] = offset & 0xFFFF0000;
5539 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5540 mcp->tov = MBX_TOV_SECONDS;
5541 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5542 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5543 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5544 rval = qla2x00_mailbox_command(vha, mcp);
5545
5546 if (rval != QLA_SUCCESS) {
5547 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5548 "mailbox command FAILED=0x%x, subcode=%x.\n",
5549 ((mcp->mb[1] << 16) | mcp->mb[0]),
5550 ((mcp->mb[3] << 16) | mcp->mb[2]));
5551 return rval;
5552 } else
5553 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5554 "Done %s.\n", __func__);
5555 offset = offset + size;
5556 }
5557 return rval;
5558 }
5559
5560 int
5561 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5562 {
5563 int rval;
5564 struct qla_hw_data *ha = vha->hw;
5565 mbx_cmd_t mc;
5566 mbx_cmd_t *mcp = &mc;
5567
5568 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5569 return QLA_FUNCTION_FAILED;
5570
5571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5572 "Entered %s.\n", __func__);
5573
5574 memset(mcp, 0, sizeof(mbx_cmd_t));
5575 mcp->mb[0] = MBC_SET_LED_CONFIG;
5576 mcp->mb[1] = led_cfg[0];
5577 mcp->mb[2] = led_cfg[1];
5578 if (IS_QLA8031(ha)) {
5579 mcp->mb[3] = led_cfg[2];
5580 mcp->mb[4] = led_cfg[3];
5581 mcp->mb[5] = led_cfg[4];
5582 mcp->mb[6] = led_cfg[5];
5583 }
5584
5585 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5586 if (IS_QLA8031(ha))
5587 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5588 mcp->in_mb = MBX_0;
5589 mcp->tov = 30;
5590 mcp->flags = 0;
5591
5592 rval = qla2x00_mailbox_command(vha, mcp);
5593 if (rval != QLA_SUCCESS) {
5594 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5595 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5596 } else {
5597 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5598 "Done %s.\n", __func__);
5599 }
5600
5601 return rval;
5602 }
5603
5604 int
5605 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5606 {
5607 int rval;
5608 struct qla_hw_data *ha = vha->hw;
5609 mbx_cmd_t mc;
5610 mbx_cmd_t *mcp = &mc;
5611
5612 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5613 return QLA_FUNCTION_FAILED;
5614
5615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5616 "Entered %s.\n", __func__);
5617
5618 memset(mcp, 0, sizeof(mbx_cmd_t));
5619 mcp->mb[0] = MBC_GET_LED_CONFIG;
5620
5621 mcp->out_mb = MBX_0;
5622 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5623 if (IS_QLA8031(ha))
5624 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5625 mcp->tov = 30;
5626 mcp->flags = 0;
5627
5628 rval = qla2x00_mailbox_command(vha, mcp);
5629 if (rval != QLA_SUCCESS) {
5630 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5631 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5632 } else {
5633 led_cfg[0] = mcp->mb[1];
5634 led_cfg[1] = mcp->mb[2];
5635 if (IS_QLA8031(ha)) {
5636 led_cfg[2] = mcp->mb[3];
5637 led_cfg[3] = mcp->mb[4];
5638 led_cfg[4] = mcp->mb[5];
5639 led_cfg[5] = mcp->mb[6];
5640 }
5641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5642 "Done %s.\n", __func__);
5643 }
5644
5645 return rval;
5646 }
5647
5648 int
5649 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5650 {
5651 int rval;
5652 struct qla_hw_data *ha = vha->hw;
5653 mbx_cmd_t mc;
5654 mbx_cmd_t *mcp = &mc;
5655
5656 if (!IS_P3P_TYPE(ha))
5657 return QLA_FUNCTION_FAILED;
5658
5659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5660 "Entered %s.\n", __func__);
5661
5662 memset(mcp, 0, sizeof(mbx_cmd_t));
5663 mcp->mb[0] = MBC_SET_LED_CONFIG;
5664 if (enable)
5665 mcp->mb[7] = 0xE;
5666 else
5667 mcp->mb[7] = 0xD;
5668
5669 mcp->out_mb = MBX_7|MBX_0;
5670 mcp->in_mb = MBX_0;
5671 mcp->tov = MBX_TOV_SECONDS;
5672 mcp->flags = 0;
5673
5674 rval = qla2x00_mailbox_command(vha, mcp);
5675 if (rval != QLA_SUCCESS) {
5676 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5677 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5678 } else {
5679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5680 "Done %s.\n", __func__);
5681 }
5682
5683 return rval;
5684 }
5685
5686 int
5687 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5688 {
5689 int rval;
5690 struct qla_hw_data *ha = vha->hw;
5691 mbx_cmd_t mc;
5692 mbx_cmd_t *mcp = &mc;
5693
5694 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5695 return QLA_FUNCTION_FAILED;
5696
5697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5698 "Entered %s.\n", __func__);
5699
5700 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5701 mcp->mb[1] = LSW(reg);
5702 mcp->mb[2] = MSW(reg);
5703 mcp->mb[3] = LSW(data);
5704 mcp->mb[4] = MSW(data);
5705 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5706
5707 mcp->in_mb = MBX_1|MBX_0;
5708 mcp->tov = MBX_TOV_SECONDS;
5709 mcp->flags = 0;
5710 rval = qla2x00_mailbox_command(vha, mcp);
5711
5712 if (rval != QLA_SUCCESS) {
5713 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5714 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5715 } else {
5716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5717 "Done %s.\n", __func__);
5718 }
5719
5720 return rval;
5721 }
5722
5723 int
5724 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5725 {
5726 int rval;
5727 struct qla_hw_data *ha = vha->hw;
5728 mbx_cmd_t mc;
5729 mbx_cmd_t *mcp = &mc;
5730
5731 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5733 "Implicit LOGO Unsupported.\n");
5734 return QLA_FUNCTION_FAILED;
5735 }
5736
5737
5738 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5739 "Entering %s.\n", __func__);
5740
5741 /* Perform Implicit LOGO. */
5742 mcp->mb[0] = MBC_PORT_LOGOUT;
5743 mcp->mb[1] = fcport->loop_id;
5744 mcp->mb[10] = BIT_15;
5745 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5746 mcp->in_mb = MBX_0;
5747 mcp->tov = MBX_TOV_SECONDS;
5748 mcp->flags = 0;
5749 rval = qla2x00_mailbox_command(vha, mcp);
5750 if (rval != QLA_SUCCESS)
5751 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5752 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5753 else
5754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5755 "Done %s.\n", __func__);
5756
5757 return rval;
5758 }
5759
5760 int
5761 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5762 {
5763 int rval;
5764 mbx_cmd_t mc;
5765 mbx_cmd_t *mcp = &mc;
5766 struct qla_hw_data *ha = vha->hw;
5767 unsigned long retry_max_time = jiffies + (2 * HZ);
5768
5769 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5770 return QLA_FUNCTION_FAILED;
5771
5772 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5773
5774 retry_rd_reg:
5775 mcp->mb[0] = MBC_READ_REMOTE_REG;
5776 mcp->mb[1] = LSW(reg);
5777 mcp->mb[2] = MSW(reg);
5778 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5779 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5780 mcp->tov = MBX_TOV_SECONDS;
5781 mcp->flags = 0;
5782 rval = qla2x00_mailbox_command(vha, mcp);
5783
5784 if (rval != QLA_SUCCESS) {
5785 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5786 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5787 rval, mcp->mb[0], mcp->mb[1]);
5788 } else {
5789 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5790 if (*data == QLA8XXX_BAD_VALUE) {
5791 /*
5792 * During soft-reset CAMRAM register reads might
5793 * return 0xbad0bad0. So retry for MAX of 2 sec
5794 * while reading camram registers.
5795 */
5796 if (time_after(jiffies, retry_max_time)) {
5797 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5798 "Failure to read CAMRAM register. "
5799 "data=0x%x.\n", *data);
5800 return QLA_FUNCTION_FAILED;
5801 }
5802 msleep(100);
5803 goto retry_rd_reg;
5804 }
5805 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5806 }
5807
5808 return rval;
5809 }
5810
5811 int
5812 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5813 {
5814 int rval;
5815 mbx_cmd_t mc;
5816 mbx_cmd_t *mcp = &mc;
5817 struct qla_hw_data *ha = vha->hw;
5818
5819 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5820 return QLA_FUNCTION_FAILED;
5821
5822 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5823
5824 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5825 mcp->out_mb = MBX_0;
5826 mcp->in_mb = MBX_1|MBX_0;
5827 mcp->tov = MBX_TOV_SECONDS;
5828 mcp->flags = 0;
5829 rval = qla2x00_mailbox_command(vha, mcp);
5830
5831 if (rval != QLA_SUCCESS) {
5832 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5833 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5834 rval, mcp->mb[0], mcp->mb[1]);
5835 ha->isp_ops->fw_dump(vha, 0);
5836 } else {
5837 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5838 }
5839
5840 return rval;
5841 }
5842
5843 int
5844 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5845 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5846 {
5847 int rval;
5848 mbx_cmd_t mc;
5849 mbx_cmd_t *mcp = &mc;
5850 uint8_t subcode = (uint8_t)options;
5851 struct qla_hw_data *ha = vha->hw;
5852
5853 if (!IS_QLA8031(ha))
5854 return QLA_FUNCTION_FAILED;
5855
5856 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5857
5858 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5859 mcp->mb[1] = options;
5860 mcp->out_mb = MBX_1|MBX_0;
5861 if (subcode & BIT_2) {
5862 mcp->mb[2] = LSW(start_addr);
5863 mcp->mb[3] = MSW(start_addr);
5864 mcp->mb[4] = LSW(end_addr);
5865 mcp->mb[5] = MSW(end_addr);
5866 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5867 }
5868 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5869 if (!(subcode & (BIT_2 | BIT_5)))
5870 mcp->in_mb |= MBX_4|MBX_3;
5871 mcp->tov = MBX_TOV_SECONDS;
5872 mcp->flags = 0;
5873 rval = qla2x00_mailbox_command(vha, mcp);
5874
5875 if (rval != QLA_SUCCESS) {
5876 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5877 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5878 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5879 mcp->mb[4]);
5880 ha->isp_ops->fw_dump(vha, 0);
5881 } else {
5882 if (subcode & BIT_5)
5883 *sector_size = mcp->mb[1];
5884 else if (subcode & (BIT_6 | BIT_7)) {
5885 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5886 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5887 } else if (subcode & (BIT_3 | BIT_4)) {
5888 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5889 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5890 }
5891 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5892 }
5893
5894 return rval;
5895 }
5896
5897 int
5898 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5899 uint32_t size)
5900 {
5901 int rval;
5902 mbx_cmd_t mc;
5903 mbx_cmd_t *mcp = &mc;
5904
5905 if (!IS_MCTP_CAPABLE(vha->hw))
5906 return QLA_FUNCTION_FAILED;
5907
5908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5909 "Entered %s.\n", __func__);
5910
5911 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5912 mcp->mb[1] = LSW(addr);
5913 mcp->mb[2] = MSW(req_dma);
5914 mcp->mb[3] = LSW(req_dma);
5915 mcp->mb[4] = MSW(size);
5916 mcp->mb[5] = LSW(size);
5917 mcp->mb[6] = MSW(MSD(req_dma));
5918 mcp->mb[7] = LSW(MSD(req_dma));
5919 mcp->mb[8] = MSW(addr);
5920 /* Setting RAM ID to valid */
5921 mcp->mb[10] |= BIT_7;
5922 /* For MCTP RAM ID is 0x40 */
5923 mcp->mb[10] |= 0x40;
5924
5925 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5926 MBX_0;
5927
5928 mcp->in_mb = MBX_0;
5929 mcp->tov = MBX_TOV_SECONDS;
5930 mcp->flags = 0;
5931 rval = qla2x00_mailbox_command(vha, mcp);
5932
5933 if (rval != QLA_SUCCESS) {
5934 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5935 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5936 } else {
5937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5938 "Done %s.\n", __func__);
5939 }
5940
5941 return rval;
5942 }
5943
5944 int
5945 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5946 void *dd_buf, uint size, uint options)
5947 {
5948 int rval;
5949 mbx_cmd_t mc;
5950 mbx_cmd_t *mcp = &mc;
5951 dma_addr_t dd_dma;
5952
5953 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5954 return QLA_FUNCTION_FAILED;
5955
5956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5957 "Entered %s.\n", __func__);
5958
5959 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5960 dd_buf, size, DMA_FROM_DEVICE);
5961 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
5962 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5963 return QLA_MEMORY_ALLOC_FAILED;
5964 }
5965
5966 memset(dd_buf, 0, size);
5967
5968 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5969 mcp->mb[1] = options;
5970 mcp->mb[2] = MSW(LSD(dd_dma));
5971 mcp->mb[3] = LSW(LSD(dd_dma));
5972 mcp->mb[6] = MSW(MSD(dd_dma));
5973 mcp->mb[7] = LSW(MSD(dd_dma));
5974 mcp->mb[8] = size;
5975 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5976 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5977 mcp->buf_size = size;
5978 mcp->flags = MBX_DMA_IN;
5979 mcp->tov = MBX_TOV_SECONDS * 4;
5980 rval = qla2x00_mailbox_command(vha, mcp);
5981
5982 if (rval != QLA_SUCCESS) {
5983 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
5984 } else {
5985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
5986 "Done %s.\n", __func__);
5987 }
5988
5989 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
5990 size, DMA_FROM_DEVICE);
5991
5992 return rval;
5993 }
5994
5995 static void qla2x00_async_mb_sp_done(void *s, int res)
5996 {
5997 struct srb *sp = s;
5998
5999 sp->u.iocb_cmd.u.mbx.rc = res;
6000
6001 complete(&sp->u.iocb_cmd.u.mbx.comp);
6002 /* don't free sp here. Let the caller do the free */
6003 }
6004
6005 /*
6006 * This mailbox uses the iocb interface to send MB command.
6007 * This allows non-critial (non chip setup) command to go
6008 * out in parrallel.
6009 */
6010 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6011 {
6012 int rval = QLA_FUNCTION_FAILED;
6013 srb_t *sp;
6014 struct srb_iocb *c;
6015
6016 if (!vha->hw->flags.fw_started)
6017 goto done;
6018
6019 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6020 if (!sp)
6021 goto done;
6022
6023 sp->type = SRB_MB_IOCB;
6024 sp->name = mb_to_str(mcp->mb[0]);
6025
6026 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6027
6028 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6029
6030 c = &sp->u.iocb_cmd;
6031 c->timeout = qla2x00_async_iocb_timeout;
6032 init_completion(&c->u.mbx.comp);
6033
6034 sp->done = qla2x00_async_mb_sp_done;
6035
6036 rval = qla2x00_start_sp(sp);
6037 if (rval != QLA_SUCCESS) {
6038 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6039 "%s: %s Failed submission. %x.\n",
6040 __func__, sp->name, rval);
6041 goto done_free_sp;
6042 }
6043
6044 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6045 sp->name, sp->handle);
6046
6047 wait_for_completion(&c->u.mbx.comp);
6048 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6049
6050 rval = c->u.mbx.rc;
6051 switch (rval) {
6052 case QLA_FUNCTION_TIMEOUT:
6053 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6054 __func__, sp->name, rval);
6055 break;
6056 case QLA_SUCCESS:
6057 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6058 __func__, sp->name);
6059 sp->free(sp);
6060 break;
6061 default:
6062 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6063 __func__, sp->name, rval);
6064 sp->free(sp);
6065 break;
6066 }
6067
6068 return rval;
6069
6070 done_free_sp:
6071 sp->free(sp);
6072 done:
6073 return rval;
6074 }
6075
6076 /*
6077 * qla24xx_gpdb_wait
6078 * NOTE: Do not call this routine from DPC thread
6079 */
6080 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6081 {
6082 int rval = QLA_FUNCTION_FAILED;
6083 dma_addr_t pd_dma;
6084 struct port_database_24xx *pd;
6085 struct qla_hw_data *ha = vha->hw;
6086 mbx_cmd_t mc;
6087
6088 if (!vha->hw->flags.fw_started)
6089 goto done;
6090
6091 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6092 if (pd == NULL) {
6093 ql_log(ql_log_warn, vha, 0xd047,
6094 "Failed to allocate port database structure.\n");
6095 goto done_free_sp;
6096 }
6097
6098 memset(&mc, 0, sizeof(mc));
6099 mc.mb[0] = MBC_GET_PORT_DATABASE;
6100 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6101 mc.mb[2] = MSW(pd_dma);
6102 mc.mb[3] = LSW(pd_dma);
6103 mc.mb[6] = MSW(MSD(pd_dma));
6104 mc.mb[7] = LSW(MSD(pd_dma));
6105 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6106 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6107
6108 rval = qla24xx_send_mb_cmd(vha, &mc);
6109 if (rval != QLA_SUCCESS) {
6110 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6111 "%s: %8phC fail\n", __func__, fcport->port_name);
6112 goto done_free_sp;
6113 }
6114
6115 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6116
6117 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6118 __func__, fcport->port_name);
6119
6120 done_free_sp:
6121 if (pd)
6122 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6123 done:
6124 return rval;
6125 }
6126
6127 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6128 struct port_database_24xx *pd)
6129 {
6130 int rval = QLA_SUCCESS;
6131 uint64_t zero = 0;
6132 u8 current_login_state, last_login_state;
6133
6134 if (fcport->fc4f_nvme) {
6135 current_login_state = pd->current_login_state >> 4;
6136 last_login_state = pd->last_login_state >> 4;
6137 } else {
6138 current_login_state = pd->current_login_state & 0xf;
6139 last_login_state = pd->last_login_state & 0xf;
6140 }
6141
6142 /* Check for logged in state. */
6143 if (current_login_state != PDS_PRLI_COMPLETE) {
6144 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6145 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6146 current_login_state, last_login_state, fcport->loop_id);
6147 rval = QLA_FUNCTION_FAILED;
6148 goto gpd_error_out;
6149 }
6150
6151 if (fcport->loop_id == FC_NO_LOOP_ID ||
6152 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6153 memcmp(fcport->port_name, pd->port_name, 8))) {
6154 /* We lost the device mid way. */
6155 rval = QLA_NOT_LOGGED_IN;
6156 goto gpd_error_out;
6157 }
6158
6159 /* Names are little-endian. */
6160 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6161 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6162
6163 /* Get port_id of device. */
6164 fcport->d_id.b.domain = pd->port_id[0];
6165 fcport->d_id.b.area = pd->port_id[1];
6166 fcport->d_id.b.al_pa = pd->port_id[2];
6167 fcport->d_id.b.rsvd_1 = 0;
6168
6169 if (fcport->fc4f_nvme) {
6170 fcport->nvme_prli_service_param =
6171 pd->prli_nvme_svc_param_word_3;
6172 fcport->port_type = FCT_NVME;
6173 } else {
6174 /* If not target must be initiator or unknown type. */
6175 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6176 fcport->port_type = FCT_INITIATOR;
6177 else
6178 fcport->port_type = FCT_TARGET;
6179 }
6180 /* Passback COS information. */
6181 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6182 FC_COS_CLASS2 : FC_COS_CLASS3;
6183
6184 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6185 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6186 fcport->conf_compl_supported = 1;
6187 }
6188
6189 gpd_error_out:
6190 return rval;
6191 }
6192
6193 /*
6194 * qla24xx_gidlist__wait
6195 * NOTE: don't call this routine from DPC thread.
6196 */
6197 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6198 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6199 {
6200 int rval = QLA_FUNCTION_FAILED;
6201 mbx_cmd_t mc;
6202
6203 if (!vha->hw->flags.fw_started)
6204 goto done;
6205
6206 memset(&mc, 0, sizeof(mc));
6207 mc.mb[0] = MBC_GET_ID_LIST;
6208 mc.mb[2] = MSW(id_list_dma);
6209 mc.mb[3] = LSW(id_list_dma);
6210 mc.mb[6] = MSW(MSD(id_list_dma));
6211 mc.mb[7] = LSW(MSD(id_list_dma));
6212 mc.mb[8] = 0;
6213 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6214
6215 rval = qla24xx_send_mb_cmd(vha, &mc);
6216 if (rval != QLA_SUCCESS) {
6217 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6218 "%s: fail\n", __func__);
6219 } else {
6220 *entries = mc.mb[1];
6221 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6222 "%s: done\n", __func__);
6223 }
6224 done:
6225 return rval;
6226 }
6227
6228 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6229 {
6230 int rval;
6231 mbx_cmd_t mc;
6232 mbx_cmd_t *mcp = &mc;
6233
6234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6235 "Entered %s\n", __func__);
6236
6237 memset(mcp->mb, 0 , sizeof(mcp->mb));
6238 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6239 mcp->mb[1] = cpu_to_le16(1);
6240 mcp->mb[2] = cpu_to_le16(value);
6241 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6242 mcp->in_mb = MBX_2 | MBX_0;
6243 mcp->tov = MBX_TOV_SECONDS;
6244 mcp->flags = 0;
6245
6246 rval = qla2x00_mailbox_command(vha, mcp);
6247
6248 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6249 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6250
6251 return rval;
6252 }
6253
6254 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6255 {
6256 int rval;
6257 mbx_cmd_t mc;
6258 mbx_cmd_t *mcp = &mc;
6259
6260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6261 "Entered %s\n", __func__);
6262
6263 memset(mcp->mb, 0, sizeof(mcp->mb));
6264 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6265 mcp->mb[1] = cpu_to_le16(0);
6266 mcp->out_mb = MBX_1 | MBX_0;
6267 mcp->in_mb = MBX_2 | MBX_0;
6268 mcp->tov = MBX_TOV_SECONDS;
6269 mcp->flags = 0;
6270
6271 rval = qla2x00_mailbox_command(vha, mcp);
6272 if (rval == QLA_SUCCESS)
6273 *value = mc.mb[2];
6274
6275 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6276 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6277
6278 return rval;
6279 }
6280
6281 int
6282 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6283 {
6284 struct qla_hw_data *ha = vha->hw;
6285 uint16_t iter, addr, offset;
6286 dma_addr_t phys_addr;
6287 int rval, c;
6288 u8 *sfp_data;
6289
6290 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6291 addr = 0xa0;
6292 phys_addr = ha->sfp_data_dma;
6293 sfp_data = ha->sfp_data;
6294 offset = c = 0;
6295
6296 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6297 if (iter == 4) {
6298 /* Skip to next device address. */
6299 addr = 0xa2;
6300 offset = 0;
6301 }
6302
6303 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6304 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6305 if (rval != QLA_SUCCESS) {
6306 ql_log(ql_log_warn, vha, 0x706d,
6307 "Unable to read SFP data (%x/%x/%x).\n", rval,
6308 addr, offset);
6309
6310 return rval;
6311 }
6312
6313 if (buf && (c < count)) {
6314 u16 sz;
6315
6316 if ((count - c) >= SFP_BLOCK_SIZE)
6317 sz = SFP_BLOCK_SIZE;
6318 else
6319 sz = count - c;
6320
6321 memcpy(buf, sfp_data, sz);
6322 buf += SFP_BLOCK_SIZE;
6323 c += sz;
6324 }
6325 phys_addr += SFP_BLOCK_SIZE;
6326 sfp_data += SFP_BLOCK_SIZE;
6327 offset += SFP_BLOCK_SIZE;
6328 }
6329
6330 return rval;
6331 }
6332
6333 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6334 uint16_t *out_mb, int out_mb_sz)
6335 {
6336 int rval = QLA_FUNCTION_FAILED;
6337 mbx_cmd_t mc;
6338
6339 if (!vha->hw->flags.fw_started)
6340 goto done;
6341
6342 memset(&mc, 0, sizeof(mc));
6343 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6344
6345 rval = qla24xx_send_mb_cmd(vha, &mc);
6346 if (rval != QLA_SUCCESS) {
6347 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6348 "%s: fail\n", __func__);
6349 } else {
6350 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6351 memcpy(out_mb, mc.mb, out_mb_sz);
6352 else
6353 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6354
6355 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6356 "%s: done\n", __func__);
6357 }
6358 done:
6359 return rval;
6360 }