]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/scsi/qla2xxx/qla_attr.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / scsi / qla2xxx / qla_attr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16
17 /* SYSFS attributes --------------------------------------------------------- */
18
19 static ssize_t
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
23 {
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0;
28
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0;
31
32 mutex_lock(&ha->optrom_mutex);
33 if (IS_P3P_TYPE(ha)) {
34 if (off < ha->md_template_size) {
35 rval = memory_read_from_buffer(buf, count,
36 &off, ha->md_tmplt_hdr, ha->md_template_size);
37 } else {
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
41 }
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 } else if (ha->fw_dump_reading) {
46 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 ha->fw_dump_len);
48 } else {
49 rval = 0;
50 }
51 mutex_unlock(&ha->optrom_mutex);
52 return rval;
53 }
54
55 static ssize_t
56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 struct bin_attribute *bin_attr,
58 char *buf, loff_t off, size_t count)
59 {
60 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 struct device, kobj)));
62 struct qla_hw_data *ha = vha->hw;
63 int reading;
64
65 if (off != 0)
66 return (0);
67
68 reading = simple_strtol(buf, NULL, 10);
69 switch (reading) {
70 case 0:
71 if (!ha->fw_dump_reading)
72 break;
73
74 ql_log(ql_log_info, vha, 0x705d,
75 "Firmware dump cleared on (%ld).\n", vha->host_no);
76
77 if (IS_P3P_TYPE(ha)) {
78 qla82xx_md_free(vha);
79 qla82xx_md_prep(vha);
80 }
81 ha->fw_dump_reading = 0;
82 ha->fw_dumped = 0;
83 break;
84 case 1:
85 if (ha->fw_dumped && !ha->fw_dump_reading) {
86 ha->fw_dump_reading = 1;
87
88 ql_log(ql_log_info, vha, 0x705e,
89 "Raw firmware dump ready for read on (%ld).\n",
90 vha->host_no);
91 }
92 break;
93 case 2:
94 qla2x00_alloc_fw_dump(vha);
95 break;
96 case 3:
97 if (IS_QLA82XX(ha)) {
98 qla82xx_idc_lock(ha);
99 qla82xx_set_reset_owner(vha);
100 qla82xx_idc_unlock(ha);
101 } else if (IS_QLA8044(ha)) {
102 qla8044_idc_lock(ha);
103 qla82xx_set_reset_owner(vha);
104 qla8044_idc_unlock(ha);
105 } else {
106 ha->fw_dump_mpi = 1;
107 qla2x00_system_error(vha);
108 }
109 break;
110 case 4:
111 if (IS_P3P_TYPE(ha)) {
112 if (ha->md_tmplt_hdr)
113 ql_dbg(ql_dbg_user, vha, 0x705b,
114 "MiniDump supported with this firmware.\n");
115 else
116 ql_dbg(ql_dbg_user, vha, 0x709d,
117 "MiniDump not supported with this firmware.\n");
118 }
119 break;
120 case 5:
121 if (IS_P3P_TYPE(ha))
122 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
123 break;
124 case 6:
125 if (!ha->mctp_dump_reading)
126 break;
127 ql_log(ql_log_info, vha, 0x70c1,
128 "MCTP dump cleared on (%ld).\n", vha->host_no);
129 ha->mctp_dump_reading = 0;
130 ha->mctp_dumped = 0;
131 break;
132 case 7:
133 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
134 ha->mctp_dump_reading = 1;
135 ql_log(ql_log_info, vha, 0x70c2,
136 "Raw mctp dump ready for read on (%ld).\n",
137 vha->host_no);
138 }
139 break;
140 }
141 return count;
142 }
143
144 static struct bin_attribute sysfs_fw_dump_attr = {
145 .attr = {
146 .name = "fw_dump",
147 .mode = S_IRUSR | S_IWUSR,
148 },
149 .size = 0,
150 .read = qla2x00_sysfs_read_fw_dump,
151 .write = qla2x00_sysfs_write_fw_dump,
152 };
153
154 static ssize_t
155 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
156 struct bin_attribute *bin_attr,
157 char *buf, loff_t off, size_t count)
158 {
159 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
160 struct device, kobj)));
161 struct qla_hw_data *ha = vha->hw;
162 uint32_t faddr;
163 struct active_regions active_regions = { };
164
165 if (!capable(CAP_SYS_ADMIN))
166 return 0;
167
168 mutex_lock(&ha->optrom_mutex);
169 if (qla2x00_chip_is_down(vha)) {
170 mutex_unlock(&ha->optrom_mutex);
171 return -EAGAIN;
172 }
173
174 if (!IS_NOCACHE_VPD_TYPE(ha)) {
175 mutex_unlock(&ha->optrom_mutex);
176 goto skip;
177 }
178
179 faddr = ha->flt_region_nvram;
180 if (IS_QLA28XX(ha)) {
181 qla28xx_get_aux_images(vha, &active_regions);
182 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
183 faddr = ha->flt_region_nvram_sec;
184 }
185 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
186
187 mutex_unlock(&ha->optrom_mutex);
188
189 skip:
190 return memory_read_from_buffer(buf, count, &off, ha->nvram,
191 ha->nvram_size);
192 }
193
194 static ssize_t
195 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
196 struct bin_attribute *bin_attr,
197 char *buf, loff_t off, size_t count)
198 {
199 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
200 struct device, kobj)));
201 struct qla_hw_data *ha = vha->hw;
202 uint16_t cnt;
203
204 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
205 !ha->isp_ops->write_nvram)
206 return -EINVAL;
207
208 /* Checksum NVRAM. */
209 if (IS_FWI2_CAPABLE(ha)) {
210 uint32_t *iter;
211 uint32_t chksum;
212
213 iter = (uint32_t *)buf;
214 chksum = 0;
215 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
216 chksum += le32_to_cpu(*iter);
217 chksum = ~chksum + 1;
218 *iter = cpu_to_le32(chksum);
219 } else {
220 uint8_t *iter;
221 uint8_t chksum;
222
223 iter = (uint8_t *)buf;
224 chksum = 0;
225 for (cnt = 0; cnt < count - 1; cnt++)
226 chksum += *iter++;
227 chksum = ~chksum + 1;
228 *iter = chksum;
229 }
230
231 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
232 ql_log(ql_log_warn, vha, 0x705f,
233 "HBA not online, failing NVRAM update.\n");
234 return -EAGAIN;
235 }
236
237 mutex_lock(&ha->optrom_mutex);
238 if (qla2x00_chip_is_down(vha)) {
239 mutex_unlock(&ha->optrom_mutex);
240 return -EAGAIN;
241 }
242
243 /* Write NVRAM. */
244 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
245 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
246 count);
247 mutex_unlock(&ha->optrom_mutex);
248
249 ql_dbg(ql_dbg_user, vha, 0x7060,
250 "Setting ISP_ABORT_NEEDED\n");
251 /* NVRAM settings take effect immediately. */
252 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
253 qla2xxx_wake_dpc(vha);
254 qla2x00_wait_for_chip_reset(vha);
255
256 return count;
257 }
258
259 static struct bin_attribute sysfs_nvram_attr = {
260 .attr = {
261 .name = "nvram",
262 .mode = S_IRUSR | S_IWUSR,
263 },
264 .size = 512,
265 .read = qla2x00_sysfs_read_nvram,
266 .write = qla2x00_sysfs_write_nvram,
267 };
268
269 static ssize_t
270 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
271 struct bin_attribute *bin_attr,
272 char *buf, loff_t off, size_t count)
273 {
274 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
275 struct device, kobj)));
276 struct qla_hw_data *ha = vha->hw;
277 ssize_t rval = 0;
278
279 mutex_lock(&ha->optrom_mutex);
280
281 if (ha->optrom_state != QLA_SREADING)
282 goto out;
283
284 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
285 ha->optrom_region_size);
286
287 out:
288 mutex_unlock(&ha->optrom_mutex);
289
290 return rval;
291 }
292
293 static ssize_t
294 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
295 struct bin_attribute *bin_attr,
296 char *buf, loff_t off, size_t count)
297 {
298 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
299 struct device, kobj)));
300 struct qla_hw_data *ha = vha->hw;
301
302 mutex_lock(&ha->optrom_mutex);
303
304 if (ha->optrom_state != QLA_SWRITING) {
305 mutex_unlock(&ha->optrom_mutex);
306 return -EINVAL;
307 }
308 if (off > ha->optrom_region_size) {
309 mutex_unlock(&ha->optrom_mutex);
310 return -ERANGE;
311 }
312 if (off + count > ha->optrom_region_size)
313 count = ha->optrom_region_size - off;
314
315 memcpy(&ha->optrom_buffer[off], buf, count);
316 mutex_unlock(&ha->optrom_mutex);
317
318 return count;
319 }
320
321 static struct bin_attribute sysfs_optrom_attr = {
322 .attr = {
323 .name = "optrom",
324 .mode = S_IRUSR | S_IWUSR,
325 },
326 .size = 0,
327 .read = qla2x00_sysfs_read_optrom,
328 .write = qla2x00_sysfs_write_optrom,
329 };
330
331 static ssize_t
332 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
333 struct bin_attribute *bin_attr,
334 char *buf, loff_t off, size_t count)
335 {
336 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
337 struct device, kobj)));
338 struct qla_hw_data *ha = vha->hw;
339 uint32_t start = 0;
340 uint32_t size = ha->optrom_size;
341 int val, valid;
342 ssize_t rval = count;
343
344 if (off)
345 return -EINVAL;
346
347 if (unlikely(pci_channel_offline(ha->pdev)))
348 return -EAGAIN;
349
350 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
351 return -EINVAL;
352 if (start > ha->optrom_size)
353 return -EINVAL;
354 if (size > ha->optrom_size - start)
355 size = ha->optrom_size - start;
356
357 mutex_lock(&ha->optrom_mutex);
358 if (qla2x00_chip_is_down(vha)) {
359 mutex_unlock(&ha->optrom_mutex);
360 return -EAGAIN;
361 }
362 switch (val) {
363 case 0:
364 if (ha->optrom_state != QLA_SREADING &&
365 ha->optrom_state != QLA_SWRITING) {
366 rval = -EINVAL;
367 goto out;
368 }
369 ha->optrom_state = QLA_SWAITING;
370
371 ql_dbg(ql_dbg_user, vha, 0x7061,
372 "Freeing flash region allocation -- 0x%x bytes.\n",
373 ha->optrom_region_size);
374
375 vfree(ha->optrom_buffer);
376 ha->optrom_buffer = NULL;
377 break;
378 case 1:
379 if (ha->optrom_state != QLA_SWAITING) {
380 rval = -EINVAL;
381 goto out;
382 }
383
384 ha->optrom_region_start = start;
385 ha->optrom_region_size = size;
386
387 ha->optrom_state = QLA_SREADING;
388 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
389 if (ha->optrom_buffer == NULL) {
390 ql_log(ql_log_warn, vha, 0x7062,
391 "Unable to allocate memory for optrom retrieval "
392 "(%x).\n", ha->optrom_region_size);
393
394 ha->optrom_state = QLA_SWAITING;
395 rval = -ENOMEM;
396 goto out;
397 }
398
399 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
400 ql_log(ql_log_warn, vha, 0x7063,
401 "HBA not online, failing NVRAM update.\n");
402 rval = -EAGAIN;
403 goto out;
404 }
405
406 ql_dbg(ql_dbg_user, vha, 0x7064,
407 "Reading flash region -- 0x%x/0x%x.\n",
408 ha->optrom_region_start, ha->optrom_region_size);
409
410 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
411 ha->optrom_region_start, ha->optrom_region_size);
412 break;
413 case 2:
414 if (ha->optrom_state != QLA_SWAITING) {
415 rval = -EINVAL;
416 goto out;
417 }
418
419 /*
420 * We need to be more restrictive on which FLASH regions are
421 * allowed to be updated via user-space. Regions accessible
422 * via this method include:
423 *
424 * ISP21xx/ISP22xx/ISP23xx type boards:
425 *
426 * 0x000000 -> 0x020000 -- Boot code.
427 *
428 * ISP2322/ISP24xx type boards:
429 *
430 * 0x000000 -> 0x07ffff -- Boot code.
431 * 0x080000 -> 0x0fffff -- Firmware.
432 *
433 * ISP25xx type boards:
434 *
435 * 0x000000 -> 0x07ffff -- Boot code.
436 * 0x080000 -> 0x0fffff -- Firmware.
437 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
438 *
439 * > ISP25xx type boards:
440 *
441 * None -- should go through BSG.
442 */
443 valid = 0;
444 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
445 valid = 1;
446 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
447 valid = 1;
448 if (!valid) {
449 ql_log(ql_log_warn, vha, 0x7065,
450 "Invalid start region 0x%x/0x%x.\n", start, size);
451 rval = -EINVAL;
452 goto out;
453 }
454
455 ha->optrom_region_start = start;
456 ha->optrom_region_size = size;
457
458 ha->optrom_state = QLA_SWRITING;
459 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
460 if (ha->optrom_buffer == NULL) {
461 ql_log(ql_log_warn, vha, 0x7066,
462 "Unable to allocate memory for optrom update "
463 "(%x)\n", ha->optrom_region_size);
464
465 ha->optrom_state = QLA_SWAITING;
466 rval = -ENOMEM;
467 goto out;
468 }
469
470 ql_dbg(ql_dbg_user, vha, 0x7067,
471 "Staging flash region write -- 0x%x/0x%x.\n",
472 ha->optrom_region_start, ha->optrom_region_size);
473
474 break;
475 case 3:
476 if (ha->optrom_state != QLA_SWRITING) {
477 rval = -EINVAL;
478 goto out;
479 }
480
481 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
482 ql_log(ql_log_warn, vha, 0x7068,
483 "HBA not online, failing flash update.\n");
484 rval = -EAGAIN;
485 goto out;
486 }
487
488 ql_dbg(ql_dbg_user, vha, 0x7069,
489 "Writing flash region -- 0x%x/0x%x.\n",
490 ha->optrom_region_start, ha->optrom_region_size);
491
492 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
493 ha->optrom_region_start, ha->optrom_region_size);
494 if (rval)
495 rval = -EIO;
496 break;
497 default:
498 rval = -EINVAL;
499 }
500
501 out:
502 mutex_unlock(&ha->optrom_mutex);
503 return rval;
504 }
505
506 static struct bin_attribute sysfs_optrom_ctl_attr = {
507 .attr = {
508 .name = "optrom_ctl",
509 .mode = S_IWUSR,
510 },
511 .size = 0,
512 .write = qla2x00_sysfs_write_optrom_ctl,
513 };
514
515 static ssize_t
516 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
517 struct bin_attribute *bin_attr,
518 char *buf, loff_t off, size_t count)
519 {
520 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
521 struct device, kobj)));
522 struct qla_hw_data *ha = vha->hw;
523 uint32_t faddr;
524 struct active_regions active_regions = { };
525
526 if (unlikely(pci_channel_offline(ha->pdev)))
527 return -EAGAIN;
528
529 if (!capable(CAP_SYS_ADMIN))
530 return -EINVAL;
531
532 if (IS_NOCACHE_VPD_TYPE(ha))
533 goto skip;
534
535 faddr = ha->flt_region_vpd << 2;
536
537 if (IS_QLA28XX(ha)) {
538 qla28xx_get_aux_images(vha, &active_regions);
539 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
540 faddr = ha->flt_region_vpd_sec << 2;
541
542 ql_dbg(ql_dbg_init, vha, 0x7070,
543 "Loading %s nvram image.\n",
544 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
545 "primary" : "secondary");
546 }
547
548 mutex_lock(&ha->optrom_mutex);
549 if (qla2x00_chip_is_down(vha)) {
550 mutex_unlock(&ha->optrom_mutex);
551 return -EAGAIN;
552 }
553
554 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
555 mutex_unlock(&ha->optrom_mutex);
556
557 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
558 skip:
559 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
560 }
561
562 static ssize_t
563 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
564 struct bin_attribute *bin_attr,
565 char *buf, loff_t off, size_t count)
566 {
567 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
568 struct device, kobj)));
569 struct qla_hw_data *ha = vha->hw;
570 uint8_t *tmp_data;
571
572 if (unlikely(pci_channel_offline(ha->pdev)))
573 return 0;
574
575 if (qla2x00_chip_is_down(vha))
576 return 0;
577
578 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
579 !ha->isp_ops->write_nvram)
580 return 0;
581
582 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
583 ql_log(ql_log_warn, vha, 0x706a,
584 "HBA not online, failing VPD update.\n");
585 return -EAGAIN;
586 }
587
588 mutex_lock(&ha->optrom_mutex);
589 if (qla2x00_chip_is_down(vha)) {
590 mutex_unlock(&ha->optrom_mutex);
591 return -EAGAIN;
592 }
593
594 /* Write NVRAM. */
595 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
596 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
597
598 /* Update flash version information for 4Gb & above. */
599 if (!IS_FWI2_CAPABLE(ha)) {
600 mutex_unlock(&ha->optrom_mutex);
601 return -EINVAL;
602 }
603
604 tmp_data = vmalloc(256);
605 if (!tmp_data) {
606 mutex_unlock(&ha->optrom_mutex);
607 ql_log(ql_log_warn, vha, 0x706b,
608 "Unable to allocate memory for VPD information update.\n");
609 return -ENOMEM;
610 }
611 ha->isp_ops->get_flash_version(vha, tmp_data);
612 vfree(tmp_data);
613
614 mutex_unlock(&ha->optrom_mutex);
615
616 return count;
617 }
618
619 static struct bin_attribute sysfs_vpd_attr = {
620 .attr = {
621 .name = "vpd",
622 .mode = S_IRUSR | S_IWUSR,
623 },
624 .size = 0,
625 .read = qla2x00_sysfs_read_vpd,
626 .write = qla2x00_sysfs_write_vpd,
627 };
628
629 static ssize_t
630 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
631 struct bin_attribute *bin_attr,
632 char *buf, loff_t off, size_t count)
633 {
634 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635 struct device, kobj)));
636 int rval;
637
638 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
639 return 0;
640
641 mutex_lock(&vha->hw->optrom_mutex);
642 if (qla2x00_chip_is_down(vha)) {
643 mutex_unlock(&vha->hw->optrom_mutex);
644 return 0;
645 }
646
647 rval = qla2x00_read_sfp_dev(vha, buf, count);
648 mutex_unlock(&vha->hw->optrom_mutex);
649
650 if (rval)
651 return -EIO;
652
653 return count;
654 }
655
656 static struct bin_attribute sysfs_sfp_attr = {
657 .attr = {
658 .name = "sfp",
659 .mode = S_IRUSR | S_IWUSR,
660 },
661 .size = SFP_DEV_SIZE,
662 .read = qla2x00_sysfs_read_sfp,
663 };
664
665 static ssize_t
666 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
667 struct bin_attribute *bin_attr,
668 char *buf, loff_t off, size_t count)
669 {
670 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
671 struct device, kobj)));
672 struct qla_hw_data *ha = vha->hw;
673 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
674 int type;
675 uint32_t idc_control;
676 uint8_t *tmp_data = NULL;
677
678 if (off != 0)
679 return -EINVAL;
680
681 type = simple_strtol(buf, NULL, 10);
682 switch (type) {
683 case 0x2025c:
684 ql_log(ql_log_info, vha, 0x706e,
685 "Issuing ISP reset.\n");
686
687 scsi_block_requests(vha->host);
688 if (IS_QLA82XX(ha)) {
689 ha->flags.isp82xx_no_md_cap = 1;
690 qla82xx_idc_lock(ha);
691 qla82xx_set_reset_owner(vha);
692 qla82xx_idc_unlock(ha);
693 } else if (IS_QLA8044(ha)) {
694 qla8044_idc_lock(ha);
695 idc_control = qla8044_rd_reg(ha,
696 QLA8044_IDC_DRV_CTRL);
697 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
698 (idc_control | GRACEFUL_RESET_BIT1));
699 qla82xx_set_reset_owner(vha);
700 qla8044_idc_unlock(ha);
701 } else {
702 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
703 qla2xxx_wake_dpc(vha);
704 }
705 qla2x00_wait_for_chip_reset(vha);
706 scsi_unblock_requests(vha->host);
707 break;
708 case 0x2025d:
709 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
710 return -EPERM;
711
712 ql_log(ql_log_info, vha, 0x706f,
713 "Issuing MPI reset.\n");
714
715 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
716 uint32_t idc_control;
717
718 qla83xx_idc_lock(vha, 0);
719 __qla83xx_get_idc_control(vha, &idc_control);
720 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
721 __qla83xx_set_idc_control(vha, idc_control);
722 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
723 QLA8XXX_DEV_NEED_RESET);
724 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
725 qla83xx_idc_unlock(vha, 0);
726 break;
727 } else {
728 /* Make sure FC side is not in reset */
729 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
730 QLA_SUCCESS);
731
732 /* Issue MPI reset */
733 scsi_block_requests(vha->host);
734 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
735 ql_log(ql_log_warn, vha, 0x7070,
736 "MPI reset failed.\n");
737 scsi_unblock_requests(vha->host);
738 break;
739 }
740 case 0x2025e:
741 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
742 ql_log(ql_log_info, vha, 0x7071,
743 "FCoE ctx reset not supported.\n");
744 return -EPERM;
745 }
746
747 ql_log(ql_log_info, vha, 0x7072,
748 "Issuing FCoE ctx reset.\n");
749 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
750 qla2xxx_wake_dpc(vha);
751 qla2x00_wait_for_fcoe_ctx_reset(vha);
752 break;
753 case 0x2025f:
754 if (!IS_QLA8031(ha))
755 return -EPERM;
756 ql_log(ql_log_info, vha, 0x70bc,
757 "Disabling Reset by IDC control\n");
758 qla83xx_idc_lock(vha, 0);
759 __qla83xx_get_idc_control(vha, &idc_control);
760 idc_control |= QLA83XX_IDC_RESET_DISABLED;
761 __qla83xx_set_idc_control(vha, idc_control);
762 qla83xx_idc_unlock(vha, 0);
763 break;
764 case 0x20260:
765 if (!IS_QLA8031(ha))
766 return -EPERM;
767 ql_log(ql_log_info, vha, 0x70bd,
768 "Enabling Reset by IDC control\n");
769 qla83xx_idc_lock(vha, 0);
770 __qla83xx_get_idc_control(vha, &idc_control);
771 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
772 __qla83xx_set_idc_control(vha, idc_control);
773 qla83xx_idc_unlock(vha, 0);
774 break;
775 case 0x20261:
776 ql_dbg(ql_dbg_user, vha, 0x70e0,
777 "Updating cache versions without reset ");
778
779 tmp_data = vmalloc(256);
780 if (!tmp_data) {
781 ql_log(ql_log_warn, vha, 0x70e1,
782 "Unable to allocate memory for VPD information update.\n");
783 return -ENOMEM;
784 }
785 ha->isp_ops->get_flash_version(vha, tmp_data);
786 vfree(tmp_data);
787 break;
788 }
789 return count;
790 }
791
792 static struct bin_attribute sysfs_reset_attr = {
793 .attr = {
794 .name = "reset",
795 .mode = S_IWUSR,
796 },
797 .size = 0,
798 .write = qla2x00_sysfs_write_reset,
799 };
800
801 static ssize_t
802 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
803 struct bin_attribute *bin_attr,
804 char *buf, loff_t off, size_t count)
805 {
806 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
807 struct device, kobj)));
808 int type;
809 port_id_t did;
810
811 if (!capable(CAP_SYS_ADMIN))
812 return 0;
813
814 if (unlikely(pci_channel_offline(vha->hw->pdev)))
815 return 0;
816
817 if (qla2x00_chip_is_down(vha))
818 return 0;
819
820 type = simple_strtol(buf, NULL, 10);
821
822 did.b.domain = (type & 0x00ff0000) >> 16;
823 did.b.area = (type & 0x0000ff00) >> 8;
824 did.b.al_pa = (type & 0x000000ff);
825
826 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
827 did.b.domain, did.b.area, did.b.al_pa);
828
829 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
830
831 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
832 return count;
833 }
834
835 static struct bin_attribute sysfs_issue_logo_attr = {
836 .attr = {
837 .name = "issue_logo",
838 .mode = S_IWUSR,
839 },
840 .size = 0,
841 .write = qla2x00_issue_logo,
842 };
843
844 static ssize_t
845 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
846 struct bin_attribute *bin_attr,
847 char *buf, loff_t off, size_t count)
848 {
849 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
850 struct device, kobj)));
851 struct qla_hw_data *ha = vha->hw;
852 int rval;
853 uint16_t actual_size;
854
855 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
856 return 0;
857
858 if (unlikely(pci_channel_offline(ha->pdev)))
859 return 0;
860 mutex_lock(&vha->hw->optrom_mutex);
861 if (qla2x00_chip_is_down(vha)) {
862 mutex_unlock(&vha->hw->optrom_mutex);
863 return 0;
864 }
865
866 if (ha->xgmac_data)
867 goto do_read;
868
869 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
870 &ha->xgmac_data_dma, GFP_KERNEL);
871 if (!ha->xgmac_data) {
872 mutex_unlock(&vha->hw->optrom_mutex);
873 ql_log(ql_log_warn, vha, 0x7076,
874 "Unable to allocate memory for XGMAC read-data.\n");
875 return 0;
876 }
877
878 do_read:
879 actual_size = 0;
880 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
881
882 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
883 XGMAC_DATA_SIZE, &actual_size);
884
885 mutex_unlock(&vha->hw->optrom_mutex);
886 if (rval != QLA_SUCCESS) {
887 ql_log(ql_log_warn, vha, 0x7077,
888 "Unable to read XGMAC data (%x).\n", rval);
889 count = 0;
890 }
891
892 count = actual_size > count ? count : actual_size;
893 memcpy(buf, ha->xgmac_data, count);
894
895 return count;
896 }
897
898 static struct bin_attribute sysfs_xgmac_stats_attr = {
899 .attr = {
900 .name = "xgmac_stats",
901 .mode = S_IRUSR,
902 },
903 .size = 0,
904 .read = qla2x00_sysfs_read_xgmac_stats,
905 };
906
907 static ssize_t
908 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
909 struct bin_attribute *bin_attr,
910 char *buf, loff_t off, size_t count)
911 {
912 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
913 struct device, kobj)));
914 struct qla_hw_data *ha = vha->hw;
915 int rval;
916
917 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
918 return 0;
919
920 if (ha->dcbx_tlv)
921 goto do_read;
922 mutex_lock(&vha->hw->optrom_mutex);
923 if (qla2x00_chip_is_down(vha)) {
924 mutex_unlock(&vha->hw->optrom_mutex);
925 return 0;
926 }
927
928 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
929 &ha->dcbx_tlv_dma, GFP_KERNEL);
930 if (!ha->dcbx_tlv) {
931 mutex_unlock(&vha->hw->optrom_mutex);
932 ql_log(ql_log_warn, vha, 0x7078,
933 "Unable to allocate memory for DCBX TLV read-data.\n");
934 return -ENOMEM;
935 }
936
937 do_read:
938 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
939
940 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
941 DCBX_TLV_DATA_SIZE);
942
943 mutex_unlock(&vha->hw->optrom_mutex);
944
945 if (rval != QLA_SUCCESS) {
946 ql_log(ql_log_warn, vha, 0x7079,
947 "Unable to read DCBX TLV (%x).\n", rval);
948 return -EIO;
949 }
950
951 memcpy(buf, ha->dcbx_tlv, count);
952
953 return count;
954 }
955
956 static struct bin_attribute sysfs_dcbx_tlv_attr = {
957 .attr = {
958 .name = "dcbx_tlv",
959 .mode = S_IRUSR,
960 },
961 .size = 0,
962 .read = qla2x00_sysfs_read_dcbx_tlv,
963 };
964
965 static struct sysfs_entry {
966 char *name;
967 struct bin_attribute *attr;
968 int type;
969 } bin_file_entries[] = {
970 { "fw_dump", &sysfs_fw_dump_attr, },
971 { "nvram", &sysfs_nvram_attr, },
972 { "optrom", &sysfs_optrom_attr, },
973 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
974 { "vpd", &sysfs_vpd_attr, 1 },
975 { "sfp", &sysfs_sfp_attr, 1 },
976 { "reset", &sysfs_reset_attr, },
977 { "issue_logo", &sysfs_issue_logo_attr, },
978 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
979 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
980 { NULL },
981 };
982
983 void
984 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
985 {
986 struct Scsi_Host *host = vha->host;
987 struct sysfs_entry *iter;
988 int ret;
989
990 for (iter = bin_file_entries; iter->name; iter++) {
991 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
992 continue;
993 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
994 continue;
995 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
996 continue;
997
998 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
999 iter->attr);
1000 if (ret)
1001 ql_log(ql_log_warn, vha, 0x00f3,
1002 "Unable to create sysfs %s binary attribute (%d).\n",
1003 iter->name, ret);
1004 else
1005 ql_dbg(ql_dbg_init, vha, 0x00f4,
1006 "Successfully created sysfs %s binary attribute.\n",
1007 iter->name);
1008 }
1009 }
1010
1011 void
1012 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1013 {
1014 struct Scsi_Host *host = vha->host;
1015 struct sysfs_entry *iter;
1016 struct qla_hw_data *ha = vha->hw;
1017
1018 for (iter = bin_file_entries; iter->name; iter++) {
1019 if (iter->type && !IS_FWI2_CAPABLE(ha))
1020 continue;
1021 if (iter->type == 2 && !IS_QLA25XX(ha))
1022 continue;
1023 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1024 continue;
1025 if (iter->type == 0x27 &&
1026 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1027 continue;
1028
1029 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1030 iter->attr);
1031 }
1032
1033 if (stop_beacon && ha->beacon_blink_led == 1)
1034 ha->isp_ops->beacon_off(vha);
1035 }
1036
1037 /* Scsi_Host attributes. */
1038
1039 static ssize_t
1040 qla2x00_driver_version_show(struct device *dev,
1041 struct device_attribute *attr, char *buf)
1042 {
1043 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1044 }
1045
1046 static ssize_t
1047 qla2x00_fw_version_show(struct device *dev,
1048 struct device_attribute *attr, char *buf)
1049 {
1050 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1051 struct qla_hw_data *ha = vha->hw;
1052 char fw_str[128];
1053
1054 return scnprintf(buf, PAGE_SIZE, "%s\n",
1055 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1056 }
1057
1058 static ssize_t
1059 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1060 char *buf)
1061 {
1062 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1063 struct qla_hw_data *ha = vha->hw;
1064 uint32_t sn;
1065
1066 if (IS_QLAFX00(vha->hw)) {
1067 return scnprintf(buf, PAGE_SIZE, "%s\n",
1068 vha->hw->mr.serial_num);
1069 } else if (IS_FWI2_CAPABLE(ha)) {
1070 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1071 return strlen(strcat(buf, "\n"));
1072 }
1073
1074 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1075 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1076 sn % 100000);
1077 }
1078
1079 static ssize_t
1080 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1081 char *buf)
1082 {
1083 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1084
1085 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1086 }
1087
1088 static ssize_t
1089 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1090 char *buf)
1091 {
1092 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1093 struct qla_hw_data *ha = vha->hw;
1094
1095 if (IS_QLAFX00(vha->hw))
1096 return scnprintf(buf, PAGE_SIZE, "%s\n",
1097 vha->hw->mr.hw_version);
1098
1099 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1100 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1101 ha->product_id[3]);
1102 }
1103
1104 static ssize_t
1105 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1106 char *buf)
1107 {
1108 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1109
1110 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1111 }
1112
1113 static ssize_t
1114 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1115 char *buf)
1116 {
1117 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1118
1119 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1120 }
1121
1122 static ssize_t
1123 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1124 char *buf)
1125 {
1126 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127 char pci_info[30];
1128
1129 return scnprintf(buf, PAGE_SIZE, "%s\n",
1130 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1131 sizeof(pci_info)));
1132 }
1133
1134 static ssize_t
1135 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1136 char *buf)
1137 {
1138 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1139 struct qla_hw_data *ha = vha->hw;
1140 int len = 0;
1141
1142 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1143 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1144 vha->device_flags & DFLG_NO_CABLE)
1145 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1146 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1147 qla2x00_chip_is_down(vha))
1148 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1149 else {
1150 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1151
1152 switch (ha->current_topology) {
1153 case ISP_CFG_NL:
1154 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1155 break;
1156 case ISP_CFG_FL:
1157 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1158 break;
1159 case ISP_CFG_N:
1160 len += scnprintf(buf + len, PAGE_SIZE-len,
1161 "N_Port to N_Port\n");
1162 break;
1163 case ISP_CFG_F:
1164 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1165 break;
1166 default:
1167 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1168 break;
1169 }
1170 }
1171 return len;
1172 }
1173
1174 static ssize_t
1175 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1176 char *buf)
1177 {
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179 int len = 0;
1180
1181 switch (vha->hw->zio_mode) {
1182 case QLA_ZIO_MODE_6:
1183 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1184 break;
1185 case QLA_ZIO_DISABLED:
1186 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1187 break;
1188 }
1189 return len;
1190 }
1191
1192 static ssize_t
1193 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1194 const char *buf, size_t count)
1195 {
1196 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1197 struct qla_hw_data *ha = vha->hw;
1198 int val = 0;
1199 uint16_t zio_mode;
1200
1201 if (!IS_ZIO_SUPPORTED(ha))
1202 return -ENOTSUPP;
1203
1204 if (sscanf(buf, "%d", &val) != 1)
1205 return -EINVAL;
1206
1207 if (val)
1208 zio_mode = QLA_ZIO_MODE_6;
1209 else
1210 zio_mode = QLA_ZIO_DISABLED;
1211
1212 /* Update per-hba values and queue a reset. */
1213 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1214 ha->zio_mode = zio_mode;
1215 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1216 }
1217 return strlen(buf);
1218 }
1219
1220 static ssize_t
1221 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1222 char *buf)
1223 {
1224 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1225
1226 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1227 }
1228
1229 static ssize_t
1230 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1231 const char *buf, size_t count)
1232 {
1233 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1234 int val = 0;
1235 uint16_t zio_timer;
1236
1237 if (sscanf(buf, "%d", &val) != 1)
1238 return -EINVAL;
1239 if (val > 25500 || val < 100)
1240 return -ERANGE;
1241
1242 zio_timer = (uint16_t)(val / 100);
1243 vha->hw->zio_timer = zio_timer;
1244
1245 return strlen(buf);
1246 }
1247
1248 static ssize_t
1249 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1250 char *buf)
1251 {
1252 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1253
1254 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1255 vha->hw->last_zio_threshold);
1256 }
1257
1258 static ssize_t
1259 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1260 const char *buf, size_t count)
1261 {
1262 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1263 int val = 0;
1264
1265 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1266 return -EINVAL;
1267 if (sscanf(buf, "%d", &val) != 1)
1268 return -EINVAL;
1269 if (val < 0 || val > 256)
1270 return -ERANGE;
1271
1272 atomic_set(&vha->hw->zio_threshold, val);
1273 return strlen(buf);
1274 }
1275
1276 static ssize_t
1277 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1278 char *buf)
1279 {
1280 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1281 int len = 0;
1282
1283 if (vha->hw->beacon_blink_led)
1284 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1285 else
1286 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1287 return len;
1288 }
1289
1290 static ssize_t
1291 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1292 const char *buf, size_t count)
1293 {
1294 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1295 struct qla_hw_data *ha = vha->hw;
1296 int val = 0;
1297 int rval;
1298
1299 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1300 return -EPERM;
1301
1302 if (sscanf(buf, "%d", &val) != 1)
1303 return -EINVAL;
1304
1305 mutex_lock(&vha->hw->optrom_mutex);
1306 if (qla2x00_chip_is_down(vha)) {
1307 mutex_unlock(&vha->hw->optrom_mutex);
1308 ql_log(ql_log_warn, vha, 0x707a,
1309 "Abort ISP active -- ignoring beacon request.\n");
1310 return -EBUSY;
1311 }
1312
1313 if (val)
1314 rval = ha->isp_ops->beacon_on(vha);
1315 else
1316 rval = ha->isp_ops->beacon_off(vha);
1317
1318 if (rval != QLA_SUCCESS)
1319 count = 0;
1320
1321 mutex_unlock(&vha->hw->optrom_mutex);
1322
1323 return count;
1324 }
1325
1326 static ssize_t
1327 qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
1328 char *buf)
1329 {
1330 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1331 struct qla_hw_data *ha = vha->hw;
1332 uint16_t led[3] = { 0 };
1333
1334 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1335 return -EPERM;
1336
1337 if (ql26xx_led_config(vha, 0, led))
1338 return scnprintf(buf, PAGE_SIZE, "\n");
1339
1340 return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
1341 led[0], led[1], led[2]);
1342 }
1343
1344 static ssize_t
1345 qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
1346 const char *buf, size_t count)
1347 {
1348 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1349 struct qla_hw_data *ha = vha->hw;
1350 uint16_t options = BIT_0;
1351 uint16_t led[3] = { 0 };
1352 uint16_t word[4];
1353 int n;
1354
1355 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1356 return -EPERM;
1357
1358 n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
1359 if (n == 4) {
1360 if (word[0] == 3) {
1361 options |= BIT_3|BIT_2|BIT_1;
1362 led[0] = word[1];
1363 led[1] = word[2];
1364 led[2] = word[3];
1365 goto write;
1366 }
1367 return -EINVAL;
1368 }
1369
1370 if (n == 2) {
1371 /* check led index */
1372 if (word[0] == 0) {
1373 options |= BIT_2;
1374 led[0] = word[1];
1375 goto write;
1376 }
1377 if (word[0] == 1) {
1378 options |= BIT_3;
1379 led[1] = word[1];
1380 goto write;
1381 }
1382 if (word[0] == 2) {
1383 options |= BIT_1;
1384 led[2] = word[1];
1385 goto write;
1386 }
1387 return -EINVAL;
1388 }
1389
1390 return -EINVAL;
1391
1392 write:
1393 if (ql26xx_led_config(vha, options, led))
1394 return -EFAULT;
1395
1396 return count;
1397 }
1398
1399 static ssize_t
1400 qla2x00_optrom_bios_version_show(struct device *dev,
1401 struct device_attribute *attr, char *buf)
1402 {
1403 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1404 struct qla_hw_data *ha = vha->hw;
1405
1406 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1407 ha->bios_revision[0]);
1408 }
1409
1410 static ssize_t
1411 qla2x00_optrom_efi_version_show(struct device *dev,
1412 struct device_attribute *attr, char *buf)
1413 {
1414 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1415 struct qla_hw_data *ha = vha->hw;
1416
1417 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1418 ha->efi_revision[0]);
1419 }
1420
1421 static ssize_t
1422 qla2x00_optrom_fcode_version_show(struct device *dev,
1423 struct device_attribute *attr, char *buf)
1424 {
1425 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1426 struct qla_hw_data *ha = vha->hw;
1427
1428 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1429 ha->fcode_revision[0]);
1430 }
1431
1432 static ssize_t
1433 qla2x00_optrom_fw_version_show(struct device *dev,
1434 struct device_attribute *attr, char *buf)
1435 {
1436 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1437 struct qla_hw_data *ha = vha->hw;
1438
1439 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1440 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1441 ha->fw_revision[3]);
1442 }
1443
1444 static ssize_t
1445 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1446 struct device_attribute *attr, char *buf)
1447 {
1448 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1449 struct qla_hw_data *ha = vha->hw;
1450
1451 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1452 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1453 return scnprintf(buf, PAGE_SIZE, "\n");
1454
1455 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1456 ha->gold_fw_version[0], ha->gold_fw_version[1],
1457 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1458 }
1459
1460 static ssize_t
1461 qla2x00_total_isp_aborts_show(struct device *dev,
1462 struct device_attribute *attr, char *buf)
1463 {
1464 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1465
1466 return scnprintf(buf, PAGE_SIZE, "%d\n",
1467 vha->qla_stats.total_isp_aborts);
1468 }
1469
1470 static ssize_t
1471 qla24xx_84xx_fw_version_show(struct device *dev,
1472 struct device_attribute *attr, char *buf)
1473 {
1474 int rval = QLA_SUCCESS;
1475 uint16_t status[2] = { 0 };
1476 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1477 struct qla_hw_data *ha = vha->hw;
1478
1479 if (!IS_QLA84XX(ha))
1480 return scnprintf(buf, PAGE_SIZE, "\n");
1481
1482 if (!ha->cs84xx->op_fw_version) {
1483 rval = qla84xx_verify_chip(vha, status);
1484
1485 if (!rval && !status[0])
1486 return scnprintf(buf, PAGE_SIZE, "%u\n",
1487 (uint32_t)ha->cs84xx->op_fw_version);
1488 }
1489
1490 return scnprintf(buf, PAGE_SIZE, "\n");
1491 }
1492
1493 static ssize_t
1494 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1495 char *buf)
1496 {
1497 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1498 struct qla_hw_data *ha = vha->hw;
1499
1500 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1501 return scnprintf(buf, PAGE_SIZE, "\n");
1502
1503 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1504 ha->serdes_version[0], ha->serdes_version[1],
1505 ha->serdes_version[2]);
1506 }
1507
1508 static ssize_t
1509 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1510 char *buf)
1511 {
1512 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1513 struct qla_hw_data *ha = vha->hw;
1514
1515 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1516 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1517 return scnprintf(buf, PAGE_SIZE, "\n");
1518
1519 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1520 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1521 ha->mpi_capabilities);
1522 }
1523
1524 static ssize_t
1525 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1526 char *buf)
1527 {
1528 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1529 struct qla_hw_data *ha = vha->hw;
1530
1531 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1532 return scnprintf(buf, PAGE_SIZE, "\n");
1533
1534 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1535 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1536 }
1537
1538 static ssize_t
1539 qla2x00_flash_block_size_show(struct device *dev,
1540 struct device_attribute *attr, char *buf)
1541 {
1542 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1543 struct qla_hw_data *ha = vha->hw;
1544
1545 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1546 }
1547
1548 static ssize_t
1549 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1550 char *buf)
1551 {
1552 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1553
1554 if (!IS_CNA_CAPABLE(vha->hw))
1555 return scnprintf(buf, PAGE_SIZE, "\n");
1556
1557 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1558 }
1559
1560 static ssize_t
1561 qla2x00_vn_port_mac_address_show(struct device *dev,
1562 struct device_attribute *attr, char *buf)
1563 {
1564 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1565
1566 if (!IS_CNA_CAPABLE(vha->hw))
1567 return scnprintf(buf, PAGE_SIZE, "\n");
1568
1569 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1570 }
1571
1572 static ssize_t
1573 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1574 char *buf)
1575 {
1576 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1577
1578 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1579 }
1580
1581 static ssize_t
1582 qla2x00_thermal_temp_show(struct device *dev,
1583 struct device_attribute *attr, char *buf)
1584 {
1585 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1586 uint16_t temp = 0;
1587 int rc;
1588
1589 mutex_lock(&vha->hw->optrom_mutex);
1590 if (qla2x00_chip_is_down(vha)) {
1591 mutex_unlock(&vha->hw->optrom_mutex);
1592 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1593 goto done;
1594 }
1595
1596 if (vha->hw->flags.eeh_busy) {
1597 mutex_unlock(&vha->hw->optrom_mutex);
1598 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1599 goto done;
1600 }
1601
1602 rc = qla2x00_get_thermal_temp(vha, &temp);
1603 mutex_unlock(&vha->hw->optrom_mutex);
1604 if (rc == QLA_SUCCESS)
1605 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1606
1607 done:
1608 return scnprintf(buf, PAGE_SIZE, "\n");
1609 }
1610
1611 static ssize_t
1612 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1613 char *buf)
1614 {
1615 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1616 int rval = QLA_FUNCTION_FAILED;
1617 uint16_t state[6];
1618 uint32_t pstate;
1619
1620 if (IS_QLAFX00(vha->hw)) {
1621 pstate = qlafx00_fw_state_show(dev, attr, buf);
1622 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1623 }
1624
1625 mutex_lock(&vha->hw->optrom_mutex);
1626 if (qla2x00_chip_is_down(vha)) {
1627 mutex_unlock(&vha->hw->optrom_mutex);
1628 ql_log(ql_log_warn, vha, 0x707c,
1629 "ISP reset active.\n");
1630 goto out;
1631 } else if (vha->hw->flags.eeh_busy) {
1632 mutex_unlock(&vha->hw->optrom_mutex);
1633 goto out;
1634 }
1635
1636 rval = qla2x00_get_firmware_state(vha, state);
1637 mutex_unlock(&vha->hw->optrom_mutex);
1638 out:
1639 if (rval != QLA_SUCCESS) {
1640 memset(state, -1, sizeof(state));
1641 rval = qla2x00_get_firmware_state(vha, state);
1642 }
1643
1644 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1645 state[0], state[1], state[2], state[3], state[4], state[5]);
1646 }
1647
1648 static ssize_t
1649 qla2x00_diag_requests_show(struct device *dev,
1650 struct device_attribute *attr, char *buf)
1651 {
1652 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1653
1654 if (!IS_BIDI_CAPABLE(vha->hw))
1655 return scnprintf(buf, PAGE_SIZE, "\n");
1656
1657 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1658 }
1659
1660 static ssize_t
1661 qla2x00_diag_megabytes_show(struct device *dev,
1662 struct device_attribute *attr, char *buf)
1663 {
1664 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1665
1666 if (!IS_BIDI_CAPABLE(vha->hw))
1667 return scnprintf(buf, PAGE_SIZE, "\n");
1668
1669 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1670 vha->bidi_stats.transfer_bytes >> 20);
1671 }
1672
1673 static ssize_t
1674 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1675 char *buf)
1676 {
1677 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1678 struct qla_hw_data *ha = vha->hw;
1679 uint32_t size;
1680
1681 if (!ha->fw_dumped)
1682 size = 0;
1683 else if (IS_P3P_TYPE(ha))
1684 size = ha->md_template_size + ha->md_dump_size;
1685 else
1686 size = ha->fw_dump_len;
1687
1688 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1689 }
1690
1691 static ssize_t
1692 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1693 struct device_attribute *attr, char *buf)
1694 {
1695 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1696
1697 if (!IS_P3P_TYPE(vha->hw))
1698 return scnprintf(buf, PAGE_SIZE, "\n");
1699 else
1700 return scnprintf(buf, PAGE_SIZE, "%s\n",
1701 vha->hw->allow_cna_fw_dump ? "true" : "false");
1702 }
1703
1704 static ssize_t
1705 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1706 struct device_attribute *attr, const char *buf, size_t count)
1707 {
1708 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1709 int val = 0;
1710
1711 if (!IS_P3P_TYPE(vha->hw))
1712 return -EINVAL;
1713
1714 if (sscanf(buf, "%d", &val) != 1)
1715 return -EINVAL;
1716
1717 vha->hw->allow_cna_fw_dump = val != 0;
1718
1719 return strlen(buf);
1720 }
1721
1722 static ssize_t
1723 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1724 char *buf)
1725 {
1726 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1727 struct qla_hw_data *ha = vha->hw;
1728
1729 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1730 return scnprintf(buf, PAGE_SIZE, "\n");
1731
1732 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1733 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1734 }
1735
1736 static ssize_t
1737 qla2x00_min_supported_speed_show(struct device *dev,
1738 struct device_attribute *attr, char *buf)
1739 {
1740 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1741 struct qla_hw_data *ha = vha->hw;
1742
1743 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1744 return scnprintf(buf, PAGE_SIZE, "\n");
1745
1746 return scnprintf(buf, PAGE_SIZE, "%s\n",
1747 ha->min_supported_speed == 6 ? "64Gps" :
1748 ha->min_supported_speed == 5 ? "32Gps" :
1749 ha->min_supported_speed == 4 ? "16Gps" :
1750 ha->min_supported_speed == 3 ? "8Gps" :
1751 ha->min_supported_speed == 2 ? "4Gps" :
1752 ha->min_supported_speed != 0 ? "unknown" : "");
1753 }
1754
1755 static ssize_t
1756 qla2x00_max_supported_speed_show(struct device *dev,
1757 struct device_attribute *attr, char *buf)
1758 {
1759 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1760 struct qla_hw_data *ha = vha->hw;
1761
1762 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1763 return scnprintf(buf, PAGE_SIZE, "\n");
1764
1765 return scnprintf(buf, PAGE_SIZE, "%s\n",
1766 ha->max_supported_speed == 2 ? "64Gps" :
1767 ha->max_supported_speed == 1 ? "32Gps" :
1768 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1769 }
1770
1771 static ssize_t
1772 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1773 const char *buf, size_t count)
1774 {
1775 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1776 ulong type, speed;
1777 int oldspeed, rval;
1778 int mode = QLA_SET_DATA_RATE_LR;
1779 struct qla_hw_data *ha = vha->hw;
1780
1781 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1782 ql_log(ql_log_warn, vha, 0x70d8,
1783 "Speed setting not supported \n");
1784 return -EINVAL;
1785 }
1786
1787 rval = kstrtol(buf, 10, &type);
1788 if (rval)
1789 return rval;
1790 speed = type;
1791 if (type == 40 || type == 80 || type == 160 ||
1792 type == 320) {
1793 ql_dbg(ql_dbg_user, vha, 0x70d9,
1794 "Setting will be affected after a loss of sync\n");
1795 type = type/10;
1796 mode = QLA_SET_DATA_RATE_NOLR;
1797 }
1798
1799 oldspeed = ha->set_data_rate;
1800
1801 switch (type) {
1802 case 0:
1803 ha->set_data_rate = PORT_SPEED_AUTO;
1804 break;
1805 case 4:
1806 ha->set_data_rate = PORT_SPEED_4GB;
1807 break;
1808 case 8:
1809 ha->set_data_rate = PORT_SPEED_8GB;
1810 break;
1811 case 16:
1812 ha->set_data_rate = PORT_SPEED_16GB;
1813 break;
1814 case 32:
1815 ha->set_data_rate = PORT_SPEED_32GB;
1816 break;
1817 default:
1818 ql_log(ql_log_warn, vha, 0x1199,
1819 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1820 speed);
1821 ha->set_data_rate = PORT_SPEED_AUTO;
1822 }
1823
1824 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1825 return -EINVAL;
1826
1827 ql_log(ql_log_info, vha, 0x70da,
1828 "Setting speed to %lx Gbps \n", type);
1829
1830 rval = qla2x00_set_data_rate(vha, mode);
1831 if (rval != QLA_SUCCESS)
1832 return -EIO;
1833
1834 return strlen(buf);
1835 }
1836
1837 static ssize_t
1838 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1842 struct qla_hw_data *ha = vha->hw;
1843 ssize_t rval;
1844 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1845
1846 rval = qla2x00_get_data_rate(vha);
1847 if (rval != QLA_SUCCESS) {
1848 ql_log(ql_log_warn, vha, 0x70db,
1849 "Unable to get port speed rval:%zd\n", rval);
1850 return -EINVAL;
1851 }
1852
1853 ql_log(ql_log_info, vha, 0x70d6,
1854 "port speed:%d\n", ha->link_data_rate);
1855
1856 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1857 }
1858
1859 /* ----- */
1860
1861 static ssize_t
1862 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1863 {
1864 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1865 int len = 0;
1866
1867 len += scnprintf(buf + len, PAGE_SIZE-len,
1868 "Supported options: enabled | disabled | dual | exclusive\n");
1869
1870 /* --- */
1871 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1872
1873 switch (vha->qlini_mode) {
1874 case QLA2XXX_INI_MODE_EXCLUSIVE:
1875 len += scnprintf(buf + len, PAGE_SIZE-len,
1876 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1877 break;
1878 case QLA2XXX_INI_MODE_DISABLED:
1879 len += scnprintf(buf + len, PAGE_SIZE-len,
1880 QLA2XXX_INI_MODE_STR_DISABLED);
1881 break;
1882 case QLA2XXX_INI_MODE_ENABLED:
1883 len += scnprintf(buf + len, PAGE_SIZE-len,
1884 QLA2XXX_INI_MODE_STR_ENABLED);
1885 break;
1886 case QLA2XXX_INI_MODE_DUAL:
1887 len += scnprintf(buf + len, PAGE_SIZE-len,
1888 QLA2XXX_INI_MODE_STR_DUAL);
1889 break;
1890 }
1891 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1892
1893 return len;
1894 }
1895
1896 static char *mode_to_str[] = {
1897 "exclusive",
1898 "disabled",
1899 "enabled",
1900 "dual",
1901 };
1902
1903 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1904 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1905 {
1906 int rc = 0;
1907 enum {
1908 NO_ACTION,
1909 MODE_CHANGE_ACCEPT,
1910 MODE_CHANGE_NO_ACTION,
1911 TARGET_STILL_ACTIVE,
1912 };
1913 int action = NO_ACTION;
1914 int set_mode = 0;
1915 u8 eo_toggle = 0; /* exchange offload flipped */
1916
1917 switch (vha->qlini_mode) {
1918 case QLA2XXX_INI_MODE_DISABLED:
1919 switch (op) {
1920 case QLA2XXX_INI_MODE_DISABLED:
1921 if (qla_tgt_mode_enabled(vha)) {
1922 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1923 vha->hw->flags.exchoffld_enabled)
1924 eo_toggle = 1;
1925 if (((vha->ql2xexchoffld !=
1926 vha->u_ql2xexchoffld) &&
1927 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1928 eo_toggle) {
1929 /*
1930 * The number of exchange to be offload
1931 * was tweaked or offload option was
1932 * flipped
1933 */
1934 action = MODE_CHANGE_ACCEPT;
1935 } else {
1936 action = MODE_CHANGE_NO_ACTION;
1937 }
1938 } else {
1939 action = MODE_CHANGE_NO_ACTION;
1940 }
1941 break;
1942 case QLA2XXX_INI_MODE_EXCLUSIVE:
1943 if (qla_tgt_mode_enabled(vha)) {
1944 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1945 vha->hw->flags.exchoffld_enabled)
1946 eo_toggle = 1;
1947 if (((vha->ql2xexchoffld !=
1948 vha->u_ql2xexchoffld) &&
1949 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1950 eo_toggle) {
1951 /*
1952 * The number of exchange to be offload
1953 * was tweaked or offload option was
1954 * flipped
1955 */
1956 action = MODE_CHANGE_ACCEPT;
1957 } else {
1958 action = MODE_CHANGE_NO_ACTION;
1959 }
1960 } else {
1961 action = MODE_CHANGE_ACCEPT;
1962 }
1963 break;
1964 case QLA2XXX_INI_MODE_DUAL:
1965 action = MODE_CHANGE_ACCEPT;
1966 /* active_mode is target only, reset it to dual */
1967 if (qla_tgt_mode_enabled(vha)) {
1968 set_mode = 1;
1969 action = MODE_CHANGE_ACCEPT;
1970 } else {
1971 action = MODE_CHANGE_NO_ACTION;
1972 }
1973 break;
1974
1975 case QLA2XXX_INI_MODE_ENABLED:
1976 if (qla_tgt_mode_enabled(vha))
1977 action = TARGET_STILL_ACTIVE;
1978 else {
1979 action = MODE_CHANGE_ACCEPT;
1980 set_mode = 1;
1981 }
1982 break;
1983 }
1984 break;
1985
1986 case QLA2XXX_INI_MODE_EXCLUSIVE:
1987 switch (op) {
1988 case QLA2XXX_INI_MODE_EXCLUSIVE:
1989 if (qla_tgt_mode_enabled(vha)) {
1990 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1991 vha->hw->flags.exchoffld_enabled)
1992 eo_toggle = 1;
1993 if (((vha->ql2xexchoffld !=
1994 vha->u_ql2xexchoffld) &&
1995 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1996 eo_toggle)
1997 /*
1998 * The number of exchange to be offload
1999 * was tweaked or offload option was
2000 * flipped
2001 */
2002 action = MODE_CHANGE_ACCEPT;
2003 else
2004 action = NO_ACTION;
2005 } else
2006 action = NO_ACTION;
2007
2008 break;
2009
2010 case QLA2XXX_INI_MODE_DISABLED:
2011 if (qla_tgt_mode_enabled(vha)) {
2012 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2013 vha->hw->flags.exchoffld_enabled)
2014 eo_toggle = 1;
2015 if (((vha->ql2xexchoffld !=
2016 vha->u_ql2xexchoffld) &&
2017 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2018 eo_toggle)
2019 action = MODE_CHANGE_ACCEPT;
2020 else
2021 action = MODE_CHANGE_NO_ACTION;
2022 } else
2023 action = MODE_CHANGE_NO_ACTION;
2024 break;
2025
2026 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
2027 if (qla_tgt_mode_enabled(vha)) {
2028 action = MODE_CHANGE_ACCEPT;
2029 set_mode = 1;
2030 } else
2031 action = MODE_CHANGE_ACCEPT;
2032 break;
2033
2034 case QLA2XXX_INI_MODE_ENABLED:
2035 if (qla_tgt_mode_enabled(vha))
2036 action = TARGET_STILL_ACTIVE;
2037 else {
2038 if (vha->hw->flags.fw_started)
2039 action = MODE_CHANGE_NO_ACTION;
2040 else
2041 action = MODE_CHANGE_ACCEPT;
2042 }
2043 break;
2044 }
2045 break;
2046
2047 case QLA2XXX_INI_MODE_ENABLED:
2048 switch (op) {
2049 case QLA2XXX_INI_MODE_ENABLED:
2050 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
2051 vha->hw->flags.exchoffld_enabled)
2052 eo_toggle = 1;
2053 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
2054 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
2055 eo_toggle)
2056 action = MODE_CHANGE_ACCEPT;
2057 else
2058 action = NO_ACTION;
2059 break;
2060 case QLA2XXX_INI_MODE_DUAL:
2061 case QLA2XXX_INI_MODE_DISABLED:
2062 action = MODE_CHANGE_ACCEPT;
2063 break;
2064 default:
2065 action = MODE_CHANGE_NO_ACTION;
2066 break;
2067 }
2068 break;
2069
2070 case QLA2XXX_INI_MODE_DUAL:
2071 switch (op) {
2072 case QLA2XXX_INI_MODE_DUAL:
2073 if (qla_tgt_mode_enabled(vha) ||
2074 qla_dual_mode_enabled(vha)) {
2075 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2076 vha->u_ql2xiniexchg) !=
2077 vha->hw->flags.exchoffld_enabled)
2078 eo_toggle = 1;
2079
2080 if ((((vha->ql2xexchoffld +
2081 vha->ql2xiniexchg) !=
2082 (vha->u_ql2xiniexchg +
2083 vha->u_ql2xexchoffld)) &&
2084 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2085 vha->u_ql2xexchoffld)) || eo_toggle)
2086 action = MODE_CHANGE_ACCEPT;
2087 else
2088 action = NO_ACTION;
2089 } else {
2090 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2091 vha->u_ql2xiniexchg) !=
2092 vha->hw->flags.exchoffld_enabled)
2093 eo_toggle = 1;
2094
2095 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2096 != (vha->u_ql2xiniexchg +
2097 vha->u_ql2xexchoffld)) &&
2098 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2099 vha->u_ql2xexchoffld)) || eo_toggle)
2100 action = MODE_CHANGE_NO_ACTION;
2101 else
2102 action = NO_ACTION;
2103 }
2104 break;
2105
2106 case QLA2XXX_INI_MODE_DISABLED:
2107 if (qla_tgt_mode_enabled(vha) ||
2108 qla_dual_mode_enabled(vha)) {
2109 /* turning off initiator mode */
2110 set_mode = 1;
2111 action = MODE_CHANGE_ACCEPT;
2112 } else {
2113 action = MODE_CHANGE_NO_ACTION;
2114 }
2115 break;
2116
2117 case QLA2XXX_INI_MODE_EXCLUSIVE:
2118 if (qla_tgt_mode_enabled(vha) ||
2119 qla_dual_mode_enabled(vha)) {
2120 set_mode = 1;
2121 action = MODE_CHANGE_ACCEPT;
2122 } else {
2123 action = MODE_CHANGE_ACCEPT;
2124 }
2125 break;
2126
2127 case QLA2XXX_INI_MODE_ENABLED:
2128 if (qla_tgt_mode_enabled(vha) ||
2129 qla_dual_mode_enabled(vha)) {
2130 action = TARGET_STILL_ACTIVE;
2131 } else {
2132 action = MODE_CHANGE_ACCEPT;
2133 }
2134 }
2135 break;
2136 }
2137
2138 switch (action) {
2139 case MODE_CHANGE_ACCEPT:
2140 ql_log(ql_log_warn, vha, 0xffff,
2141 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2142 mode_to_str[vha->qlini_mode], mode_to_str[op],
2143 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2144 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2145
2146 vha->qlini_mode = op;
2147 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2148 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2149 if (set_mode)
2150 qlt_set_mode(vha);
2151 vha->flags.online = 1;
2152 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2153 break;
2154
2155 case MODE_CHANGE_NO_ACTION:
2156 ql_log(ql_log_warn, vha, 0xffff,
2157 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2158 mode_to_str[vha->qlini_mode], mode_to_str[op],
2159 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2160 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2161 vha->qlini_mode = op;
2162 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2163 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2164 break;
2165
2166 case TARGET_STILL_ACTIVE:
2167 ql_log(ql_log_warn, vha, 0xffff,
2168 "Target Mode is active. Unable to change Mode.\n");
2169 break;
2170
2171 case NO_ACTION:
2172 default:
2173 ql_log(ql_log_warn, vha, 0xffff,
2174 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2175 vha->qlini_mode, op,
2176 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2177 break;
2178 }
2179
2180 return rc;
2181 }
2182
2183 static ssize_t
2184 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2185 const char *buf, size_t count)
2186 {
2187 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2188 int ini;
2189
2190 if (!buf)
2191 return -EINVAL;
2192
2193 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2194 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2195 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2196 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2197 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2198 ini = QLA2XXX_INI_MODE_DISABLED;
2199 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2200 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2201 ini = QLA2XXX_INI_MODE_ENABLED;
2202 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2203 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2204 ini = QLA2XXX_INI_MODE_DUAL;
2205 else
2206 return -EINVAL;
2207
2208 qla_set_ini_mode(vha, ini);
2209 return strlen(buf);
2210 }
2211
2212 static ssize_t
2213 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2214 char *buf)
2215 {
2216 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2217 int len = 0;
2218
2219 len += scnprintf(buf + len, PAGE_SIZE-len,
2220 "target exchange: new %d : current: %d\n\n",
2221 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2222
2223 len += scnprintf(buf + len, PAGE_SIZE-len,
2224 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2225 vha->host_no);
2226
2227 return len;
2228 }
2229
2230 static ssize_t
2231 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2232 const char *buf, size_t count)
2233 {
2234 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2235 int val = 0;
2236
2237 if (sscanf(buf, "%d", &val) != 1)
2238 return -EINVAL;
2239
2240 if (val > FW_MAX_EXCHANGES_CNT)
2241 val = FW_MAX_EXCHANGES_CNT;
2242 else if (val < 0)
2243 val = 0;
2244
2245 vha->u_ql2xexchoffld = val;
2246 return strlen(buf);
2247 }
2248
2249 static ssize_t
2250 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2251 char *buf)
2252 {
2253 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2254 int len = 0;
2255
2256 len += scnprintf(buf + len, PAGE_SIZE-len,
2257 "target exchange: new %d : current: %d\n\n",
2258 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2259
2260 len += scnprintf(buf + len, PAGE_SIZE-len,
2261 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2262 vha->host_no);
2263
2264 return len;
2265 }
2266
2267 static ssize_t
2268 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2269 const char *buf, size_t count)
2270 {
2271 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2272 int val = 0;
2273
2274 if (sscanf(buf, "%d", &val) != 1)
2275 return -EINVAL;
2276
2277 if (val > FW_MAX_EXCHANGES_CNT)
2278 val = FW_MAX_EXCHANGES_CNT;
2279 else if (val < 0)
2280 val = 0;
2281
2282 vha->u_ql2xiniexchg = val;
2283 return strlen(buf);
2284 }
2285
2286 static ssize_t
2287 qla2x00_dif_bundle_statistics_show(struct device *dev,
2288 struct device_attribute *attr, char *buf)
2289 {
2290 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2291 struct qla_hw_data *ha = vha->hw;
2292
2293 return scnprintf(buf, PAGE_SIZE,
2294 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2295 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2296 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2297 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2298 }
2299
2300 static ssize_t
2301 qla2x00_fw_attr_show(struct device *dev,
2302 struct device_attribute *attr, char *buf)
2303 {
2304 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2305 struct qla_hw_data *ha = vha->hw;
2306
2307 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2308 return scnprintf(buf, PAGE_SIZE, "\n");
2309
2310 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2311 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2312 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2313 (uint64_t)ha->fw_attributes_h << 16 |
2314 (uint64_t)ha->fw_attributes);
2315 }
2316
2317 static ssize_t
2318 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2319 char *buf)
2320 {
2321 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2322
2323 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2324 }
2325
2326 static ssize_t
2327 qla2x00_dport_diagnostics_show(struct device *dev,
2328 struct device_attribute *attr, char *buf)
2329 {
2330 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2331
2332 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2333 !IS_QLA28XX(vha->hw))
2334 return scnprintf(buf, PAGE_SIZE, "\n");
2335
2336 if (!*vha->dport_data)
2337 return scnprintf(buf, PAGE_SIZE, "\n");
2338
2339 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
2340 vha->dport_data[0], vha->dport_data[1],
2341 vha->dport_data[2], vha->dport_data[3]);
2342 }
2343 static DEVICE_ATTR(dport_diagnostics, 0444,
2344 qla2x00_dport_diagnostics_show, NULL);
2345
2346 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2347 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2348 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2349 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2350 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2351 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2352 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2353 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2354 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2355 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2356 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2357 qla2x00_zio_timer_store);
2358 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2359 qla2x00_beacon_store);
2360 static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
2361 qla2x00_beacon_config_store);
2362 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2363 qla2x00_optrom_bios_version_show, NULL);
2364 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2365 qla2x00_optrom_efi_version_show, NULL);
2366 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2367 qla2x00_optrom_fcode_version_show, NULL);
2368 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2369 NULL);
2370 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2371 qla2x00_optrom_gold_fw_version_show, NULL);
2372 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2373 NULL);
2374 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2375 NULL);
2376 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2377 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2378 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2379 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2380 NULL);
2381 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2382 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2383 qla2x00_vn_port_mac_address_show, NULL);
2384 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2385 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2386 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2387 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2388 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2389 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2390 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2391 qla2x00_allow_cna_fw_dump_show,
2392 qla2x00_allow_cna_fw_dump_store);
2393 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2394 static DEVICE_ATTR(min_supported_speed, 0444,
2395 qla2x00_min_supported_speed_show, NULL);
2396 static DEVICE_ATTR(max_supported_speed, 0444,
2397 qla2x00_max_supported_speed_show, NULL);
2398 static DEVICE_ATTR(zio_threshold, 0644,
2399 qla_zio_threshold_show,
2400 qla_zio_threshold_store);
2401 static DEVICE_ATTR_RW(qlini_mode);
2402 static DEVICE_ATTR_RW(ql2xexchoffld);
2403 static DEVICE_ATTR_RW(ql2xiniexchg);
2404 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2405 qla2x00_dif_bundle_statistics_show, NULL);
2406 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2407 qla2x00_port_speed_store);
2408 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2409 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2410
2411
2412 struct device_attribute *qla2x00_host_attrs[] = {
2413 &dev_attr_driver_version,
2414 &dev_attr_fw_version,
2415 &dev_attr_serial_num,
2416 &dev_attr_isp_name,
2417 &dev_attr_isp_id,
2418 &dev_attr_model_name,
2419 &dev_attr_model_desc,
2420 &dev_attr_pci_info,
2421 &dev_attr_link_state,
2422 &dev_attr_zio,
2423 &dev_attr_zio_timer,
2424 &dev_attr_beacon,
2425 &dev_attr_beacon_config,
2426 &dev_attr_optrom_bios_version,
2427 &dev_attr_optrom_efi_version,
2428 &dev_attr_optrom_fcode_version,
2429 &dev_attr_optrom_fw_version,
2430 &dev_attr_84xx_fw_version,
2431 &dev_attr_total_isp_aborts,
2432 &dev_attr_serdes_version,
2433 &dev_attr_mpi_version,
2434 &dev_attr_phy_version,
2435 &dev_attr_flash_block_size,
2436 &dev_attr_vlan_id,
2437 &dev_attr_vn_port_mac_address,
2438 &dev_attr_fabric_param,
2439 &dev_attr_fw_state,
2440 &dev_attr_optrom_gold_fw_version,
2441 &dev_attr_thermal_temp,
2442 &dev_attr_diag_requests,
2443 &dev_attr_diag_megabytes,
2444 &dev_attr_fw_dump_size,
2445 &dev_attr_allow_cna_fw_dump,
2446 &dev_attr_pep_version,
2447 &dev_attr_min_supported_speed,
2448 &dev_attr_max_supported_speed,
2449 &dev_attr_zio_threshold,
2450 &dev_attr_dif_bundle_statistics,
2451 &dev_attr_port_speed,
2452 &dev_attr_port_no,
2453 &dev_attr_fw_attr,
2454 &dev_attr_dport_diagnostics,
2455 NULL, /* reserve for qlini_mode */
2456 NULL, /* reserve for ql2xiniexchg */
2457 NULL, /* reserve for ql2xexchoffld */
2458 NULL,
2459 };
2460
2461 void qla_insert_tgt_attrs(void)
2462 {
2463 struct device_attribute **attr;
2464
2465 /* advance to empty slot */
2466 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2467 continue;
2468
2469 *attr = &dev_attr_qlini_mode;
2470 attr++;
2471 *attr = &dev_attr_ql2xiniexchg;
2472 attr++;
2473 *attr = &dev_attr_ql2xexchoffld;
2474 }
2475
2476 /* Host attributes. */
2477
2478 static void
2479 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2480 {
2481 scsi_qla_host_t *vha = shost_priv(shost);
2482
2483 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2484 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2485 }
2486
2487 static void
2488 qla2x00_get_host_speed(struct Scsi_Host *shost)
2489 {
2490 scsi_qla_host_t *vha = shost_priv(shost);
2491 u32 speed;
2492
2493 if (IS_QLAFX00(vha->hw)) {
2494 qlafx00_get_host_speed(shost);
2495 return;
2496 }
2497
2498 switch (vha->hw->link_data_rate) {
2499 case PORT_SPEED_1GB:
2500 speed = FC_PORTSPEED_1GBIT;
2501 break;
2502 case PORT_SPEED_2GB:
2503 speed = FC_PORTSPEED_2GBIT;
2504 break;
2505 case PORT_SPEED_4GB:
2506 speed = FC_PORTSPEED_4GBIT;
2507 break;
2508 case PORT_SPEED_8GB:
2509 speed = FC_PORTSPEED_8GBIT;
2510 break;
2511 case PORT_SPEED_10GB:
2512 speed = FC_PORTSPEED_10GBIT;
2513 break;
2514 case PORT_SPEED_16GB:
2515 speed = FC_PORTSPEED_16GBIT;
2516 break;
2517 case PORT_SPEED_32GB:
2518 speed = FC_PORTSPEED_32GBIT;
2519 break;
2520 case PORT_SPEED_64GB:
2521 speed = FC_PORTSPEED_64GBIT;
2522 break;
2523 default:
2524 speed = FC_PORTSPEED_UNKNOWN;
2525 break;
2526 }
2527
2528 fc_host_speed(shost) = speed;
2529 }
2530
2531 static void
2532 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2533 {
2534 scsi_qla_host_t *vha = shost_priv(shost);
2535 uint32_t port_type;
2536
2537 if (vha->vp_idx) {
2538 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2539 return;
2540 }
2541 switch (vha->hw->current_topology) {
2542 case ISP_CFG_NL:
2543 port_type = FC_PORTTYPE_LPORT;
2544 break;
2545 case ISP_CFG_FL:
2546 port_type = FC_PORTTYPE_NLPORT;
2547 break;
2548 case ISP_CFG_N:
2549 port_type = FC_PORTTYPE_PTP;
2550 break;
2551 case ISP_CFG_F:
2552 port_type = FC_PORTTYPE_NPORT;
2553 break;
2554 default:
2555 port_type = FC_PORTTYPE_UNKNOWN;
2556 break;
2557 }
2558
2559 fc_host_port_type(shost) = port_type;
2560 }
2561
2562 static void
2563 qla2x00_get_starget_node_name(struct scsi_target *starget)
2564 {
2565 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2566 scsi_qla_host_t *vha = shost_priv(host);
2567 fc_port_t *fcport;
2568 u64 node_name = 0;
2569
2570 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2571 if (fcport->rport &&
2572 starget->id == fcport->rport->scsi_target_id) {
2573 node_name = wwn_to_u64(fcport->node_name);
2574 break;
2575 }
2576 }
2577
2578 fc_starget_node_name(starget) = node_name;
2579 }
2580
2581 static void
2582 qla2x00_get_starget_port_name(struct scsi_target *starget)
2583 {
2584 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2585 scsi_qla_host_t *vha = shost_priv(host);
2586 fc_port_t *fcport;
2587 u64 port_name = 0;
2588
2589 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2590 if (fcport->rport &&
2591 starget->id == fcport->rport->scsi_target_id) {
2592 port_name = wwn_to_u64(fcport->port_name);
2593 break;
2594 }
2595 }
2596
2597 fc_starget_port_name(starget) = port_name;
2598 }
2599
2600 static void
2601 qla2x00_get_starget_port_id(struct scsi_target *starget)
2602 {
2603 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2604 scsi_qla_host_t *vha = shost_priv(host);
2605 fc_port_t *fcport;
2606 uint32_t port_id = ~0U;
2607
2608 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2609 if (fcport->rport &&
2610 starget->id == fcport->rport->scsi_target_id) {
2611 port_id = fcport->d_id.b.domain << 16 |
2612 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2613 break;
2614 }
2615 }
2616
2617 fc_starget_port_id(starget) = port_id;
2618 }
2619
2620 static inline void
2621 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2622 {
2623 rport->dev_loss_tmo = timeout ? timeout : 1;
2624 }
2625
2626 static void
2627 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2628 {
2629 struct Scsi_Host *host = rport_to_shost(rport);
2630 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2631 unsigned long flags;
2632
2633 if (!fcport)
2634 return;
2635
2636 /* Now that the rport has been deleted, set the fcport state to
2637 FCS_DEVICE_DEAD */
2638 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2639
2640 /*
2641 * Transport has effectively 'deleted' the rport, clear
2642 * all local references.
2643 */
2644 spin_lock_irqsave(host->host_lock, flags);
2645 fcport->rport = fcport->drport = NULL;
2646 *((fc_port_t **)rport->dd_data) = NULL;
2647 spin_unlock_irqrestore(host->host_lock, flags);
2648
2649 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2650 return;
2651
2652 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2653 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2654 return;
2655 }
2656 }
2657
2658 static void
2659 qla2x00_terminate_rport_io(struct fc_rport *rport)
2660 {
2661 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2662
2663 if (!fcport)
2664 return;
2665
2666 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2667 return;
2668
2669 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2670 return;
2671
2672 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2673 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2674 return;
2675 }
2676 /*
2677 * At this point all fcport's software-states are cleared. Perform any
2678 * final cleanup of firmware resources (PCBs and XCBs).
2679 */
2680 if (fcport->loop_id != FC_NO_LOOP_ID) {
2681 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2682 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2683 fcport->loop_id, fcport->d_id.b.domain,
2684 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2685 else
2686 qla2x00_port_logout(fcport->vha, fcport);
2687 }
2688 }
2689
2690 static int
2691 qla2x00_issue_lip(struct Scsi_Host *shost)
2692 {
2693 scsi_qla_host_t *vha = shost_priv(shost);
2694
2695 if (IS_QLAFX00(vha->hw))
2696 return 0;
2697
2698 qla2x00_loop_reset(vha);
2699 return 0;
2700 }
2701
2702 static struct fc_host_statistics *
2703 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2704 {
2705 scsi_qla_host_t *vha = shost_priv(shost);
2706 struct qla_hw_data *ha = vha->hw;
2707 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2708 int rval;
2709 struct link_statistics *stats;
2710 dma_addr_t stats_dma;
2711 struct fc_host_statistics *p = &vha->fc_host_stat;
2712
2713 memset(p, -1, sizeof(*p));
2714
2715 if (IS_QLAFX00(vha->hw))
2716 goto done;
2717
2718 if (test_bit(UNLOADING, &vha->dpc_flags))
2719 goto done;
2720
2721 if (unlikely(pci_channel_offline(ha->pdev)))
2722 goto done;
2723
2724 if (qla2x00_chip_is_down(vha))
2725 goto done;
2726
2727 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2728 GFP_KERNEL);
2729 if (!stats) {
2730 ql_log(ql_log_warn, vha, 0x707d,
2731 "Failed to allocate memory for stats.\n");
2732 goto done;
2733 }
2734
2735 rval = QLA_FUNCTION_FAILED;
2736 if (IS_FWI2_CAPABLE(ha)) {
2737 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2738 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2739 !ha->dpc_active) {
2740 /* Must be in a 'READY' state for statistics retrieval. */
2741 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2742 stats, stats_dma);
2743 }
2744
2745 if (rval != QLA_SUCCESS)
2746 goto done_free;
2747
2748 p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
2749 p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
2750 p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
2751 p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
2752 p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
2753 p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
2754 if (IS_FWI2_CAPABLE(ha)) {
2755 p->lip_count = le32_to_cpu(stats->lip_cnt);
2756 p->tx_frames = le32_to_cpu(stats->tx_frames);
2757 p->rx_frames = le32_to_cpu(stats->rx_frames);
2758 p->dumped_frames = le32_to_cpu(stats->discarded_frames);
2759 p->nos_count = le32_to_cpu(stats->nos_rcvd);
2760 p->error_frames =
2761 le32_to_cpu(stats->dropped_frames) +
2762 le32_to_cpu(stats->discarded_frames);
2763 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2764 p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
2765 p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
2766 } else {
2767 p->rx_words = vha->qla_stats.input_bytes;
2768 p->tx_words = vha->qla_stats.output_bytes;
2769 }
2770 }
2771 p->fcp_control_requests = vha->qla_stats.control_requests;
2772 p->fcp_input_requests = vha->qla_stats.input_requests;
2773 p->fcp_output_requests = vha->qla_stats.output_requests;
2774 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2775 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2776 p->seconds_since_last_reset =
2777 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2778 do_div(p->seconds_since_last_reset, HZ);
2779
2780 done_free:
2781 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2782 stats, stats_dma);
2783 done:
2784 return p;
2785 }
2786
2787 static void
2788 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2789 {
2790 scsi_qla_host_t *vha = shost_priv(shost);
2791 struct qla_hw_data *ha = vha->hw;
2792 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2793 struct link_statistics *stats;
2794 dma_addr_t stats_dma;
2795
2796 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2797 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2798
2799 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2800
2801 if (IS_FWI2_CAPABLE(ha)) {
2802 stats = dma_alloc_coherent(&ha->pdev->dev,
2803 sizeof(*stats), &stats_dma, GFP_KERNEL);
2804 if (!stats) {
2805 ql_log(ql_log_warn, vha, 0x70d7,
2806 "Failed to allocate memory for stats.\n");
2807 return;
2808 }
2809
2810 /* reset firmware statistics */
2811 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2812
2813 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2814 stats, stats_dma);
2815 }
2816 }
2817
2818 static void
2819 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2820 {
2821 scsi_qla_host_t *vha = shost_priv(shost);
2822
2823 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2824 sizeof(fc_host_symbolic_name(shost)));
2825 }
2826
2827 static void
2828 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2829 {
2830 scsi_qla_host_t *vha = shost_priv(shost);
2831
2832 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2833 }
2834
2835 static void
2836 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2837 {
2838 scsi_qla_host_t *vha = shost_priv(shost);
2839 static const uint8_t node_name[WWN_SIZE] = {
2840 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2841 };
2842 u64 fabric_name = wwn_to_u64(node_name);
2843
2844 if (vha->device_flags & SWITCH_FOUND)
2845 fabric_name = wwn_to_u64(vha->fabric_node_name);
2846
2847 fc_host_fabric_name(shost) = fabric_name;
2848 }
2849
2850 static void
2851 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2852 {
2853 scsi_qla_host_t *vha = shost_priv(shost);
2854 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2855
2856 if (!base_vha->flags.online) {
2857 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2858 return;
2859 }
2860
2861 switch (atomic_read(&base_vha->loop_state)) {
2862 case LOOP_UPDATE:
2863 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2864 break;
2865 case LOOP_DOWN:
2866 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2867 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2868 else
2869 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2870 break;
2871 case LOOP_DEAD:
2872 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2873 break;
2874 case LOOP_READY:
2875 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2876 break;
2877 default:
2878 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2879 break;
2880 }
2881 }
2882
2883 static int
2884 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2885 {
2886 int ret = 0;
2887 uint8_t qos = 0;
2888 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2889 scsi_qla_host_t *vha = NULL;
2890 struct qla_hw_data *ha = base_vha->hw;
2891 int cnt;
2892 struct req_que *req = ha->req_q_map[0];
2893 struct qla_qpair *qpair;
2894
2895 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2896 if (ret) {
2897 ql_log(ql_log_warn, vha, 0x707e,
2898 "Vport sanity check failed, status %x\n", ret);
2899 return (ret);
2900 }
2901
2902 vha = qla24xx_create_vhost(fc_vport);
2903 if (vha == NULL) {
2904 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2905 return FC_VPORT_FAILED;
2906 }
2907 if (disable) {
2908 atomic_set(&vha->vp_state, VP_OFFLINE);
2909 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2910 } else
2911 atomic_set(&vha->vp_state, VP_FAILED);
2912
2913 /* ready to create vport */
2914 ql_log(ql_log_info, vha, 0x7080,
2915 "VP entry id %d assigned.\n", vha->vp_idx);
2916
2917 /* initialized vport states */
2918 atomic_set(&vha->loop_state, LOOP_DOWN);
2919 vha->vp_err_state = VP_ERR_PORTDWN;
2920 vha->vp_prev_err_state = VP_ERR_UNKWN;
2921 /* Check if physical ha port is Up */
2922 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2923 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2924 /* Don't retry or attempt login of this virtual port */
2925 ql_dbg(ql_dbg_user, vha, 0x7081,
2926 "Vport loop state is not UP.\n");
2927 atomic_set(&vha->loop_state, LOOP_DEAD);
2928 if (!disable)
2929 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2930 }
2931
2932 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2933 if (ha->fw_attributes & BIT_4) {
2934 int prot = 0, guard;
2935
2936 vha->flags.difdix_supported = 1;
2937 ql_dbg(ql_dbg_user, vha, 0x7082,
2938 "Registered for DIF/DIX type 1 and 3 protection.\n");
2939 if (ql2xenabledif == 1)
2940 prot = SHOST_DIX_TYPE0_PROTECTION;
2941 scsi_host_set_prot(vha->host,
2942 prot | SHOST_DIF_TYPE1_PROTECTION
2943 | SHOST_DIF_TYPE2_PROTECTION
2944 | SHOST_DIF_TYPE3_PROTECTION
2945 | SHOST_DIX_TYPE1_PROTECTION
2946 | SHOST_DIX_TYPE2_PROTECTION
2947 | SHOST_DIX_TYPE3_PROTECTION);
2948
2949 guard = SHOST_DIX_GUARD_CRC;
2950
2951 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2952 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2953 guard |= SHOST_DIX_GUARD_IP;
2954
2955 scsi_host_set_guard(vha->host, guard);
2956 } else
2957 vha->flags.difdix_supported = 0;
2958 }
2959
2960 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2961 &ha->pdev->dev)) {
2962 ql_dbg(ql_dbg_user, vha, 0x7083,
2963 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2964 goto vport_create_failed_2;
2965 }
2966
2967 /* initialize attributes */
2968 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2969 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2970 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2971 fc_host_supported_classes(vha->host) =
2972 fc_host_supported_classes(base_vha->host);
2973 fc_host_supported_speeds(vha->host) =
2974 fc_host_supported_speeds(base_vha->host);
2975
2976 qlt_vport_create(vha, ha);
2977 qla24xx_vport_disable(fc_vport, disable);
2978
2979 if (!ql2xmqsupport || !ha->npiv_info)
2980 goto vport_queue;
2981
2982 /* Create a request queue in QoS mode for the vport */
2983 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2984 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2985 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2986 8) == 0) {
2987 qos = ha->npiv_info[cnt].q_qos;
2988 break;
2989 }
2990 }
2991
2992 if (qos) {
2993 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2994 if (!qpair)
2995 ql_log(ql_log_warn, vha, 0x7084,
2996 "Can't create qpair for VP[%d]\n",
2997 vha->vp_idx);
2998 else {
2999 ql_dbg(ql_dbg_multiq, vha, 0xc001,
3000 "Queue pair: %d Qos: %d) created for VP[%d]\n",
3001 qpair->id, qos, vha->vp_idx);
3002 ql_dbg(ql_dbg_user, vha, 0x7085,
3003 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
3004 qpair->id, qos, vha->vp_idx);
3005 req = qpair->req;
3006 vha->qpair = qpair;
3007 }
3008 }
3009
3010 vport_queue:
3011 vha->req = req;
3012 return 0;
3013
3014 vport_create_failed_2:
3015 qla24xx_disable_vp(vha);
3016 qla24xx_deallocate_vp_id(vha);
3017 scsi_host_put(vha->host);
3018 return FC_VPORT_FAILED;
3019 }
3020
3021 static int
3022 qla24xx_vport_delete(struct fc_vport *fc_vport)
3023 {
3024 scsi_qla_host_t *vha = fc_vport->dd_data;
3025 struct qla_hw_data *ha = vha->hw;
3026 uint16_t id = vha->vp_idx;
3027
3028 set_bit(VPORT_DELETE, &vha->dpc_flags);
3029
3030 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
3031 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
3032 msleep(1000);
3033
3034 qla_nvme_delete(vha);
3035
3036 qla24xx_disable_vp(vha);
3037 qla2x00_wait_for_sess_deletion(vha);
3038
3039 vha->flags.delete_progress = 1;
3040
3041 qlt_remove_target(ha, vha);
3042
3043 fc_remove_host(vha->host);
3044
3045 scsi_remove_host(vha->host);
3046
3047 /* Allow timer to run to drain queued items, when removing vp */
3048 qla24xx_deallocate_vp_id(vha);
3049
3050 if (vha->timer_active) {
3051 qla2x00_vp_stop_timer(vha);
3052 ql_dbg(ql_dbg_user, vha, 0x7086,
3053 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
3054 }
3055
3056 qla2x00_free_fcports(vha);
3057
3058 mutex_lock(&ha->vport_lock);
3059 ha->cur_vport_count--;
3060 clear_bit(vha->vp_idx, ha->vp_idx_map);
3061 mutex_unlock(&ha->vport_lock);
3062
3063 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
3064 vha->gnl.ldma);
3065
3066 vha->gnl.l = NULL;
3067
3068 vfree(vha->scan.l);
3069
3070 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
3071 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
3072 ql_log(ql_log_warn, vha, 0x7087,
3073 "Queue Pair delete failed.\n");
3074 }
3075
3076 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
3077 scsi_host_put(vha->host);
3078 return 0;
3079 }
3080
3081 static int
3082 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
3083 {
3084 scsi_qla_host_t *vha = fc_vport->dd_data;
3085
3086 if (disable)
3087 qla24xx_disable_vp(vha);
3088 else
3089 qla24xx_enable_vp(vha);
3090
3091 return 0;
3092 }
3093
3094 struct fc_function_template qla2xxx_transport_functions = {
3095
3096 .show_host_node_name = 1,
3097 .show_host_port_name = 1,
3098 .show_host_supported_classes = 1,
3099 .show_host_supported_speeds = 1,
3100
3101 .get_host_port_id = qla2x00_get_host_port_id,
3102 .show_host_port_id = 1,
3103 .get_host_speed = qla2x00_get_host_speed,
3104 .show_host_speed = 1,
3105 .get_host_port_type = qla2x00_get_host_port_type,
3106 .show_host_port_type = 1,
3107 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3108 .show_host_symbolic_name = 1,
3109 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3110 .show_host_system_hostname = 1,
3111 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3112 .show_host_fabric_name = 1,
3113 .get_host_port_state = qla2x00_get_host_port_state,
3114 .show_host_port_state = 1,
3115
3116 .dd_fcrport_size = sizeof(struct fc_port *),
3117 .show_rport_supported_classes = 1,
3118
3119 .get_starget_node_name = qla2x00_get_starget_node_name,
3120 .show_starget_node_name = 1,
3121 .get_starget_port_name = qla2x00_get_starget_port_name,
3122 .show_starget_port_name = 1,
3123 .get_starget_port_id = qla2x00_get_starget_port_id,
3124 .show_starget_port_id = 1,
3125
3126 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3127 .show_rport_dev_loss_tmo = 1,
3128
3129 .issue_fc_host_lip = qla2x00_issue_lip,
3130 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3131 .terminate_rport_io = qla2x00_terminate_rport_io,
3132 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3133 .reset_fc_host_stats = qla2x00_reset_host_stats,
3134
3135 .vport_create = qla24xx_vport_create,
3136 .vport_disable = qla24xx_vport_disable,
3137 .vport_delete = qla24xx_vport_delete,
3138 .bsg_request = qla24xx_bsg_request,
3139 .bsg_timeout = qla24xx_bsg_timeout,
3140 };
3141
3142 struct fc_function_template qla2xxx_transport_vport_functions = {
3143
3144 .show_host_node_name = 1,
3145 .show_host_port_name = 1,
3146 .show_host_supported_classes = 1,
3147
3148 .get_host_port_id = qla2x00_get_host_port_id,
3149 .show_host_port_id = 1,
3150 .get_host_speed = qla2x00_get_host_speed,
3151 .show_host_speed = 1,
3152 .get_host_port_type = qla2x00_get_host_port_type,
3153 .show_host_port_type = 1,
3154 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3155 .show_host_symbolic_name = 1,
3156 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3157 .show_host_system_hostname = 1,
3158 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3159 .show_host_fabric_name = 1,
3160 .get_host_port_state = qla2x00_get_host_port_state,
3161 .show_host_port_state = 1,
3162
3163 .dd_fcrport_size = sizeof(struct fc_port *),
3164 .show_rport_supported_classes = 1,
3165
3166 .get_starget_node_name = qla2x00_get_starget_node_name,
3167 .show_starget_node_name = 1,
3168 .get_starget_port_name = qla2x00_get_starget_port_name,
3169 .show_starget_port_name = 1,
3170 .get_starget_port_id = qla2x00_get_starget_port_id,
3171 .show_starget_port_id = 1,
3172
3173 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3174 .show_rport_dev_loss_tmo = 1,
3175
3176 .issue_fc_host_lip = qla2x00_issue_lip,
3177 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3178 .terminate_rport_io = qla2x00_terminate_rport_io,
3179 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3180 .reset_fc_host_stats = qla2x00_reset_host_stats,
3181
3182 .bsg_request = qla24xx_bsg_request,
3183 .bsg_timeout = qla24xx_bsg_timeout,
3184 };
3185
3186 void
3187 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3188 {
3189 struct qla_hw_data *ha = vha->hw;
3190 u32 speeds = FC_PORTSPEED_UNKNOWN;
3191
3192 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3193 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3194 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3195 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3196 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3197 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3198 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3199
3200 if (IS_CNA_CAPABLE(ha))
3201 speeds = FC_PORTSPEED_10GBIT;
3202 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3203 if (ha->max_supported_speed == 2) {
3204 if (ha->min_supported_speed <= 6)
3205 speeds |= FC_PORTSPEED_64GBIT;
3206 }
3207 if (ha->max_supported_speed == 2 ||
3208 ha->max_supported_speed == 1) {
3209 if (ha->min_supported_speed <= 5)
3210 speeds |= FC_PORTSPEED_32GBIT;
3211 }
3212 if (ha->max_supported_speed == 2 ||
3213 ha->max_supported_speed == 1 ||
3214 ha->max_supported_speed == 0) {
3215 if (ha->min_supported_speed <= 4)
3216 speeds |= FC_PORTSPEED_16GBIT;
3217 }
3218 if (ha->max_supported_speed == 1 ||
3219 ha->max_supported_speed == 0) {
3220 if (ha->min_supported_speed <= 3)
3221 speeds |= FC_PORTSPEED_8GBIT;
3222 }
3223 if (ha->max_supported_speed == 0) {
3224 if (ha->min_supported_speed <= 2)
3225 speeds |= FC_PORTSPEED_4GBIT;
3226 }
3227 } else if (IS_QLA2031(ha))
3228 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3229 FC_PORTSPEED_4GBIT;
3230 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3231 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3232 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3233 else if (IS_QLA24XX_TYPE(ha))
3234 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3235 FC_PORTSPEED_1GBIT;
3236 else if (IS_QLA23XX(ha))
3237 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3238 else
3239 speeds = FC_PORTSPEED_1GBIT;
3240
3241 fc_host_supported_speeds(vha->host) = speeds;
3242 }