1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
5 * Copyright 2006 Tejun Heo <htejun@gmail.com>
7 * libata documentation is available via 'make {ps|pdf}docs',
8 * as Documentation/driver-api/libata.rst
10 * Hardware documentation available from http://www.t13.org/ and
11 * http://www.sata-io.org/
14 #include <linux/kernel.h>
15 #include <linux/blkdev.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <scsi/scsi.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_eh.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_dbg.h>
24 #include "../scsi/scsi_transport_api.h"
26 #include <linux/libata.h>
28 #include <trace/events/libata.h>
32 /* speed down verdicts */
33 ATA_EH_SPDN_NCQ_OFF
= (1 << 0),
34 ATA_EH_SPDN_SPEED_DOWN
= (1 << 1),
35 ATA_EH_SPDN_FALLBACK_TO_PIO
= (1 << 2),
36 ATA_EH_SPDN_KEEP_ERRORS
= (1 << 3),
39 ATA_EFLAG_IS_IO
= (1 << 0),
40 ATA_EFLAG_DUBIOUS_XFER
= (1 << 1),
41 ATA_EFLAG_OLD_ER
= (1 << 31),
43 /* error categories */
46 ATA_ECAT_TOUT_HSM
= 2,
48 ATA_ECAT_DUBIOUS_NONE
= 4,
49 ATA_ECAT_DUBIOUS_ATA_BUS
= 5,
50 ATA_ECAT_DUBIOUS_TOUT_HSM
= 6,
51 ATA_ECAT_DUBIOUS_UNK_DEV
= 7,
54 ATA_EH_CMD_DFL_TIMEOUT
= 5000,
56 /* always put at least this amount of time between resets */
57 ATA_EH_RESET_COOL_DOWN
= 5000,
59 /* Waiting in ->prereset can never be reliable. It's
60 * sometimes nice to wait there but it can't be depended upon;
61 * otherwise, we wouldn't be resetting. Just give it enough
62 * time for most drives to spin up.
64 ATA_EH_PRERESET_TIMEOUT
= 10000,
65 ATA_EH_FASTDRAIN_INTERVAL
= 3000,
69 /* probe speed down parameters, see ata_eh_schedule_probe() */
70 ATA_EH_PROBE_TRIAL_INTERVAL
= 60000, /* 1 min */
71 ATA_EH_PROBE_TRIALS
= 2,
74 /* The following table determines how we sequence resets. Each entry
75 * represents timeout for that try. The first try can be soft or
76 * hardreset. All others are hardreset if available. In most cases
77 * the first reset w/ 10sec timeout should succeed. Following entries
78 * are mostly for error handling, hotplug and those outlier devices that
79 * take an exceptionally long time to recover from reset.
81 static const unsigned long ata_eh_reset_timeouts
[] = {
82 10000, /* most drives spin up by 10sec */
83 10000, /* > 99% working drives spin up before 20sec */
84 35000, /* give > 30 secs of idleness for outlier devices */
85 5000, /* and sweet one last chance */
86 ULONG_MAX
, /* > 1 min has elapsed, give up */
89 static const unsigned int ata_eh_identify_timeouts
[] = {
90 5000, /* covers > 99% of successes and not too boring on failures */
91 10000, /* combined time till here is enough even for media access */
92 30000, /* for true idiots */
96 static const unsigned int ata_eh_revalidate_timeouts
[] = {
97 15000, /* Some drives are slow to read log pages when waking-up */
98 15000, /* combined time till here is enough even for media access */
102 static const unsigned int ata_eh_flush_timeouts
[] = {
103 15000, /* be generous with flush */
105 30000, /* and even more generous */
109 static const unsigned int ata_eh_other_timeouts
[] = {
110 5000, /* same rationale as identify timeout */
112 /* but no merciful 30sec for other commands, it just isn't worth it */
116 struct ata_eh_cmd_timeout_ent
{
118 const unsigned int *timeouts
;
121 /* The following table determines timeouts to use for EH internal
122 * commands. Each table entry is a command class and matches the
123 * commands the entry applies to and the timeout table to use.
125 * On the retry after a command timed out, the next timeout value from
126 * the table is used. If the table doesn't contain further entries,
127 * the last value is used.
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
130 * command class, so if SET_FEATURES times out on the first try, the
131 * next try will use the second timeout value only for that class.
133 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
134 static const struct ata_eh_cmd_timeout_ent
135 ata_eh_cmd_timeout_table
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE
] = {
136 { .commands
= CMDS(ATA_CMD_ID_ATA
, ATA_CMD_ID_ATAPI
),
137 .timeouts
= ata_eh_identify_timeouts
, },
138 { .commands
= CMDS(ATA_CMD_READ_LOG_EXT
, ATA_CMD_READ_LOG_DMA_EXT
),
139 .timeouts
= ata_eh_revalidate_timeouts
, },
140 { .commands
= CMDS(ATA_CMD_READ_NATIVE_MAX
, ATA_CMD_READ_NATIVE_MAX_EXT
),
141 .timeouts
= ata_eh_other_timeouts
, },
142 { .commands
= CMDS(ATA_CMD_SET_MAX
, ATA_CMD_SET_MAX_EXT
),
143 .timeouts
= ata_eh_other_timeouts
, },
144 { .commands
= CMDS(ATA_CMD_SET_FEATURES
),
145 .timeouts
= ata_eh_other_timeouts
, },
146 { .commands
= CMDS(ATA_CMD_INIT_DEV_PARAMS
),
147 .timeouts
= ata_eh_other_timeouts
, },
148 { .commands
= CMDS(ATA_CMD_FLUSH
, ATA_CMD_FLUSH_EXT
),
149 .timeouts
= ata_eh_flush_timeouts
},
153 static void __ata_port_freeze(struct ata_port
*ap
);
155 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
156 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
157 #else /* CONFIG_PM */
158 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
161 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
163 #endif /* CONFIG_PM */
165 static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info
*ehi
,
166 const char *fmt
, va_list args
)
168 ehi
->desc_len
+= vscnprintf(ehi
->desc
+ ehi
->desc_len
,
169 ATA_EH_DESC_LEN
- ehi
->desc_len
,
174 * __ata_ehi_push_desc - push error description without adding separator
176 * @fmt: printf format string
178 * Format string according to @fmt and append it to @ehi->desc.
181 * spin_lock_irqsave(host lock)
183 void __ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
188 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
191 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc
);
194 * ata_ehi_push_desc - push error description with separator
196 * @fmt: printf format string
198 * Format string according to @fmt and append it to @ehi->desc.
199 * If @ehi->desc is not empty, ", " is added in-between.
202 * spin_lock_irqsave(host lock)
204 void ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
209 __ata_ehi_push_desc(ehi
, ", ");
212 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
215 EXPORT_SYMBOL_GPL(ata_ehi_push_desc
);
218 * ata_ehi_clear_desc - clean error description
224 * spin_lock_irqsave(host lock)
226 void ata_ehi_clear_desc(struct ata_eh_info
*ehi
)
231 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc
);
234 * ata_port_desc - append port description
235 * @ap: target ATA port
236 * @fmt: printf format string
238 * Format string according to @fmt and append it to port
239 * description. If port description is not empty, " " is added
240 * in-between. This function is to be used while initializing
241 * ata_host. The description is printed on host registration.
246 void ata_port_desc(struct ata_port
*ap
, const char *fmt
, ...)
250 WARN_ON(!(ap
->pflags
& ATA_PFLAG_INITIALIZING
));
252 if (ap
->link
.eh_info
.desc_len
)
253 __ata_ehi_push_desc(&ap
->link
.eh_info
, " ");
256 __ata_ehi_pushv_desc(&ap
->link
.eh_info
, fmt
, args
);
259 EXPORT_SYMBOL_GPL(ata_port_desc
);
263 * ata_port_pbar_desc - append PCI BAR description
264 * @ap: target ATA port
265 * @bar: target PCI BAR
266 * @offset: offset into PCI BAR
267 * @name: name of the area
269 * If @offset is negative, this function formats a string which
270 * contains the name, address, size and type of the BAR and
271 * appends it to the port description. If @offset is zero or
272 * positive, only name and offsetted address is appended.
277 void ata_port_pbar_desc(struct ata_port
*ap
, int bar
, ssize_t offset
,
280 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
282 unsigned long long start
, len
;
284 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
286 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
289 start
= (unsigned long long)pci_resource_start(pdev
, bar
);
290 len
= (unsigned long long)pci_resource_len(pdev
, bar
);
293 ata_port_desc(ap
, "%s %s%llu@0x%llx", name
, type
, len
, start
);
295 ata_port_desc(ap
, "%s 0x%llx", name
,
296 start
+ (unsigned long long)offset
);
298 EXPORT_SYMBOL_GPL(ata_port_pbar_desc
);
299 #endif /* CONFIG_PCI */
301 static int ata_lookup_timeout_table(u8 cmd
)
305 for (i
= 0; i
< ATA_EH_CMD_TIMEOUT_TABLE_SIZE
; i
++) {
308 for (cur
= ata_eh_cmd_timeout_table
[i
].commands
; *cur
; cur
++)
317 * ata_internal_cmd_timeout - determine timeout for an internal command
318 * @dev: target device
319 * @cmd: internal command to be issued
321 * Determine timeout for internal command @cmd for @dev.
327 * Determined timeout.
329 unsigned int ata_internal_cmd_timeout(struct ata_device
*dev
, u8 cmd
)
331 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
332 int ent
= ata_lookup_timeout_table(cmd
);
336 return ATA_EH_CMD_DFL_TIMEOUT
;
338 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
339 return ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
];
343 * ata_internal_cmd_timed_out - notification for internal command timeout
344 * @dev: target device
345 * @cmd: internal command which timed out
347 * Notify EH that internal command @cmd for @dev timed out. This
348 * function should be called only for commands whose timeouts are
349 * determined using ata_internal_cmd_timeout().
354 void ata_internal_cmd_timed_out(struct ata_device
*dev
, u8 cmd
)
356 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
357 int ent
= ata_lookup_timeout_table(cmd
);
363 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
364 if (ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
+ 1] != UINT_MAX
)
365 ehc
->cmd_timeout_idx
[dev
->devno
][ent
]++;
368 static void ata_ering_record(struct ata_ering
*ering
, unsigned int eflags
,
369 unsigned int err_mask
)
371 struct ata_ering_entry
*ent
;
376 ering
->cursor
%= ATA_ERING_SIZE
;
378 ent
= &ering
->ring
[ering
->cursor
];
379 ent
->eflags
= eflags
;
380 ent
->err_mask
= err_mask
;
381 ent
->timestamp
= get_jiffies_64();
384 static struct ata_ering_entry
*ata_ering_top(struct ata_ering
*ering
)
386 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
393 int ata_ering_map(struct ata_ering
*ering
,
394 int (*map_fn
)(struct ata_ering_entry
*, void *),
398 struct ata_ering_entry
*ent
;
402 ent
= &ering
->ring
[idx
];
405 rc
= map_fn(ent
, arg
);
408 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
409 } while (idx
!= ering
->cursor
);
414 static int ata_ering_clear_cb(struct ata_ering_entry
*ent
, void *void_arg
)
416 ent
->eflags
|= ATA_EFLAG_OLD_ER
;
420 static void ata_ering_clear(struct ata_ering
*ering
)
422 ata_ering_map(ering
, ata_ering_clear_cb
, NULL
);
425 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
427 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
429 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
432 static void ata_eh_clear_action(struct ata_link
*link
, struct ata_device
*dev
,
433 struct ata_eh_info
*ehi
, unsigned int action
)
435 struct ata_device
*tdev
;
438 ehi
->action
&= ~action
;
439 ata_for_each_dev(tdev
, link
, ALL
)
440 ehi
->dev_action
[tdev
->devno
] &= ~action
;
442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
445 /* break ehi->action into ehi->dev_action */
446 if (ehi
->action
& action
) {
447 ata_for_each_dev(tdev
, link
, ALL
)
448 ehi
->dev_action
[tdev
->devno
] |=
449 ehi
->action
& action
;
450 ehi
->action
&= ~action
;
453 /* turn off the specified per-dev action */
454 ehi
->dev_action
[dev
->devno
] &= ~action
;
459 * ata_eh_acquire - acquire EH ownership
460 * @ap: ATA port to acquire EH ownership for
462 * Acquire EH ownership for @ap. This is the basic exclusion
463 * mechanism for ports sharing a host. Only one port hanging off
464 * the same host can claim the ownership of EH.
469 void ata_eh_acquire(struct ata_port
*ap
)
471 mutex_lock(&ap
->host
->eh_mutex
);
472 WARN_ON_ONCE(ap
->host
->eh_owner
);
473 ap
->host
->eh_owner
= current
;
477 * ata_eh_release - release EH ownership
478 * @ap: ATA port to release EH ownership for
480 * Release EH ownership for @ap if the caller. The caller must
481 * have acquired EH ownership using ata_eh_acquire() previously.
486 void ata_eh_release(struct ata_port
*ap
)
488 WARN_ON_ONCE(ap
->host
->eh_owner
!= current
);
489 ap
->host
->eh_owner
= NULL
;
490 mutex_unlock(&ap
->host
->eh_mutex
);
493 static void ata_eh_unload(struct ata_port
*ap
)
495 struct ata_link
*link
;
496 struct ata_device
*dev
;
499 /* Restore SControl IPM and SPD for the next driver and
500 * disable attached devices.
502 ata_for_each_link(link
, ap
, PMP_FIRST
) {
503 sata_scr_write(link
, SCR_CONTROL
, link
->saved_scontrol
& 0xff0);
504 ata_for_each_dev(dev
, link
, ALL
)
505 ata_dev_disable(dev
);
508 /* freeze and set UNLOADED */
509 spin_lock_irqsave(ap
->lock
, flags
);
511 ata_port_freeze(ap
); /* won't be thawed */
512 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
; /* clear pending from freeze */
513 ap
->pflags
|= ATA_PFLAG_UNLOADED
;
515 spin_unlock_irqrestore(ap
->lock
, flags
);
519 * ata_scsi_error - SCSI layer error handler callback
520 * @host: SCSI host on which error occurred
522 * Handles SCSI-layer-thrown error events.
525 * Inherited from SCSI layer (none, can sleep)
530 void ata_scsi_error(struct Scsi_Host
*host
)
532 struct ata_port
*ap
= ata_shost_to_port(host
);
534 LIST_HEAD(eh_work_q
);
536 spin_lock_irqsave(host
->host_lock
, flags
);
537 list_splice_init(&host
->eh_cmd_q
, &eh_work_q
);
538 spin_unlock_irqrestore(host
->host_lock
, flags
);
540 ata_scsi_cmd_error_handler(host
, ap
, &eh_work_q
);
542 /* If we timed raced normal completion and there is nothing to
543 recover nr_timedout == 0 why exactly are we doing error recovery ? */
544 ata_scsi_port_error_handler(host
, ap
);
546 /* finish or retry handled scmd's and clean up */
547 WARN_ON(!list_empty(&eh_work_q
));
552 * ata_scsi_cmd_error_handler - error callback for a list of commands
553 * @host: scsi host containing the port
554 * @ap: ATA port within the host
555 * @eh_work_q: list of commands to process
557 * process the given list of commands and return those finished to the
558 * ap->eh_done_q. This function is the first part of the libata error
559 * handler which processes a given list of failed commands.
561 void ata_scsi_cmd_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
,
562 struct list_head
*eh_work_q
)
567 /* make sure sff pio task is not running */
568 ata_sff_flush_pio_task(ap
);
570 /* synchronize with host lock and sort out timeouts */
572 /* For new EH, all qcs are finished in one of three ways -
573 * normal completion, error completion, and SCSI timeout.
574 * Both completions can race against SCSI timeout. When normal
575 * completion wins, the qc never reaches EH. When error
576 * completion wins, the qc has ATA_QCFLAG_FAILED set.
578 * When SCSI timeout wins, things are a bit more complex.
579 * Normal or error completion can occur after the timeout but
580 * before this point. In such cases, both types of
581 * completions are honored. A scmd is determined to have
582 * timed out iff its associated qc is active and not failed.
584 spin_lock_irqsave(ap
->lock
, flags
);
585 if (ap
->ops
->error_handler
) {
586 struct scsi_cmnd
*scmd
, *tmp
;
589 /* This must occur under the ap->lock as we don't want
590 a polled recovery to race the real interrupt handler
592 The lost_interrupt handler checks for any completed but
593 non-notified command and completes much like an IRQ handler.
595 We then fall into the error recovery code which will treat
596 this as if normal completion won the race */
598 if (ap
->ops
->lost_interrupt
)
599 ap
->ops
->lost_interrupt(ap
);
601 list_for_each_entry_safe(scmd
, tmp
, eh_work_q
, eh_entry
) {
602 struct ata_queued_cmd
*qc
;
604 ata_qc_for_each_raw(ap
, qc
, i
) {
605 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
610 if (i
< ATA_MAX_QUEUE
) {
611 /* the scmd has an associated qc */
612 if (!(qc
->flags
& ATA_QCFLAG_FAILED
)) {
613 /* which hasn't failed yet, timeout */
614 qc
->err_mask
|= AC_ERR_TIMEOUT
;
615 qc
->flags
|= ATA_QCFLAG_FAILED
;
619 /* Normal completion occurred after
620 * SCSI timeout but before this point.
621 * Successfully complete it.
623 scmd
->retries
= scmd
->allowed
;
624 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
628 /* If we have timed out qcs. They belong to EH from
629 * this point but the state of the controller is
630 * unknown. Freeze the port to make sure the IRQ
631 * handler doesn't diddle with those qcs. This must
632 * be done atomically w.r.t. setting QCFLAG_FAILED.
635 __ata_port_freeze(ap
);
638 /* initialize eh_tries */
639 ap
->eh_tries
= ATA_EH_MAX_TRIES
;
641 spin_unlock_irqrestore(ap
->lock
, flags
);
644 EXPORT_SYMBOL(ata_scsi_cmd_error_handler
);
647 * ata_scsi_port_error_handler - recover the port after the commands
648 * @host: SCSI host containing the port
651 * Handle the recovery of the port @ap after all the commands
652 * have been recovered.
654 void ata_scsi_port_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
)
658 /* invoke error handler */
659 if (ap
->ops
->error_handler
) {
660 struct ata_link
*link
;
662 /* acquire EH ownership */
665 /* kill fast drain timer */
666 del_timer_sync(&ap
->fastdrain_timer
);
668 /* process port resume request */
669 ata_eh_handle_port_resume(ap
);
671 /* fetch & clear EH info */
672 spin_lock_irqsave(ap
->lock
, flags
);
674 ata_for_each_link(link
, ap
, HOST_FIRST
) {
675 struct ata_eh_context
*ehc
= &link
->eh_context
;
676 struct ata_device
*dev
;
678 memset(&link
->eh_context
, 0, sizeof(link
->eh_context
));
679 link
->eh_context
.i
= link
->eh_info
;
680 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
682 ata_for_each_dev(dev
, link
, ENABLED
) {
683 int devno
= dev
->devno
;
685 ehc
->saved_xfer_mode
[devno
] = dev
->xfer_mode
;
686 if (ata_ncq_enabled(dev
))
687 ehc
->saved_ncq_enabled
|= 1 << devno
;
691 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
692 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
693 ap
->excl_link
= NULL
; /* don't maintain exclusion over EH */
695 spin_unlock_irqrestore(ap
->lock
, flags
);
697 /* invoke EH, skip if unloading or suspended */
698 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
699 ap
->ops
->error_handler(ap
);
701 /* if unloading, commence suicide */
702 if ((ap
->pflags
& ATA_PFLAG_UNLOADING
) &&
703 !(ap
->pflags
& ATA_PFLAG_UNLOADED
))
708 /* process port suspend request */
709 ata_eh_handle_port_suspend(ap
);
711 /* Exception might have happened after ->error_handler
712 * recovered the port but before this point. Repeat
715 spin_lock_irqsave(ap
->lock
, flags
);
717 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
718 if (--ap
->eh_tries
) {
719 spin_unlock_irqrestore(ap
->lock
, flags
);
723 "EH pending after %d tries, giving up\n",
725 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
728 /* this run is complete, make sure EH info is clear */
729 ata_for_each_link(link
, ap
, HOST_FIRST
)
730 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
732 /* end eh (clear host_eh_scheduled) while holding
733 * ap->lock such that if exception occurs after this
734 * point but before EH completion, SCSI midlayer will
739 spin_unlock_irqrestore(ap
->lock
, flags
);
742 WARN_ON(ata_qc_from_tag(ap
, ap
->link
.active_tag
) == NULL
);
743 ap
->ops
->eng_timeout(ap
);
746 scsi_eh_flush_done_q(&ap
->eh_done_q
);
749 spin_lock_irqsave(ap
->lock
, flags
);
751 if (ap
->pflags
& ATA_PFLAG_LOADING
)
752 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
753 else if ((ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
) &&
754 !(ap
->flags
& ATA_FLAG_SAS_HOST
))
755 schedule_delayed_work(&ap
->hotplug_task
, 0);
757 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
758 ata_port_info(ap
, "EH complete\n");
760 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
762 /* tell wait_eh that we're done */
763 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
764 wake_up_all(&ap
->eh_wait_q
);
766 spin_unlock_irqrestore(ap
->lock
, flags
);
768 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler
);
771 * ata_port_wait_eh - Wait for the currently pending EH to complete
772 * @ap: Port to wait EH for
774 * Wait until the currently pending EH is complete.
777 * Kernel thread context (may sleep).
779 void ata_port_wait_eh(struct ata_port
*ap
)
785 spin_lock_irqsave(ap
->lock
, flags
);
787 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
788 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
789 spin_unlock_irqrestore(ap
->lock
, flags
);
791 spin_lock_irqsave(ap
->lock
, flags
);
793 finish_wait(&ap
->eh_wait_q
, &wait
);
795 spin_unlock_irqrestore(ap
->lock
, flags
);
797 /* make sure SCSI EH is complete */
798 if (scsi_host_in_recovery(ap
->scsi_host
)) {
803 EXPORT_SYMBOL_GPL(ata_port_wait_eh
);
805 static unsigned int ata_eh_nr_in_flight(struct ata_port
*ap
)
807 struct ata_queued_cmd
*qc
;
811 /* count only non-internal commands */
812 ata_qc_for_each(ap
, qc
, tag
) {
820 void ata_eh_fastdrain_timerfn(struct timer_list
*t
)
822 struct ata_port
*ap
= from_timer(ap
, t
, fastdrain_timer
);
826 spin_lock_irqsave(ap
->lock
, flags
);
828 cnt
= ata_eh_nr_in_flight(ap
);
834 if (cnt
== ap
->fastdrain_cnt
) {
835 struct ata_queued_cmd
*qc
;
838 /* No progress during the last interval, tag all
839 * in-flight qcs as timed out and freeze the port.
841 ata_qc_for_each(ap
, qc
, tag
) {
843 qc
->err_mask
|= AC_ERR_TIMEOUT
;
848 /* some qcs have finished, give it another chance */
849 ap
->fastdrain_cnt
= cnt
;
850 ap
->fastdrain_timer
.expires
=
851 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
852 add_timer(&ap
->fastdrain_timer
);
856 spin_unlock_irqrestore(ap
->lock
, flags
);
860 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
861 * @ap: target ATA port
862 * @fastdrain: activate fast drain
864 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
865 * is non-zero and EH wasn't pending before. Fast drain ensures
866 * that EH kicks in in timely manner.
869 * spin_lock_irqsave(host lock)
871 static void ata_eh_set_pending(struct ata_port
*ap
, int fastdrain
)
875 /* already scheduled? */
876 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
)
879 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
884 /* do we have in-flight qcs? */
885 cnt
= ata_eh_nr_in_flight(ap
);
889 /* activate fast drain */
890 ap
->fastdrain_cnt
= cnt
;
891 ap
->fastdrain_timer
.expires
=
892 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
893 add_timer(&ap
->fastdrain_timer
);
897 * ata_qc_schedule_eh - schedule qc for error handling
898 * @qc: command to schedule error handling for
900 * Schedule error handling for @qc. EH will kick in as soon as
901 * other commands are drained.
904 * spin_lock_irqsave(host lock)
906 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
908 struct ata_port
*ap
= qc
->ap
;
910 WARN_ON(!ap
->ops
->error_handler
);
912 qc
->flags
|= ATA_QCFLAG_FAILED
;
913 ata_eh_set_pending(ap
, 1);
915 /* The following will fail if timeout has already expired.
916 * ata_scsi_error() takes care of such scmds on EH entry.
917 * Note that ATA_QCFLAG_FAILED is unconditionally set after
918 * this function completes.
920 blk_abort_request(scsi_cmd_to_rq(qc
->scsicmd
));
924 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
925 * @ap: ATA port to schedule EH for
927 * LOCKING: inherited from ata_port_schedule_eh
928 * spin_lock_irqsave(host lock)
930 void ata_std_sched_eh(struct ata_port
*ap
)
932 WARN_ON(!ap
->ops
->error_handler
);
934 if (ap
->pflags
& ATA_PFLAG_INITIALIZING
)
937 ata_eh_set_pending(ap
, 1);
938 scsi_schedule_eh(ap
->scsi_host
);
940 trace_ata_std_sched_eh(ap
);
942 EXPORT_SYMBOL_GPL(ata_std_sched_eh
);
945 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
946 * @ap: ATA port to end EH for
948 * In the libata object model there is a 1:1 mapping of ata_port to
949 * shost, so host fields can be directly manipulated under ap->lock, in
950 * the libsas case we need to hold a lock at the ha->level to coordinate
954 * spin_lock_irqsave(host lock)
956 void ata_std_end_eh(struct ata_port
*ap
)
958 struct Scsi_Host
*host
= ap
->scsi_host
;
960 host
->host_eh_scheduled
= 0;
962 EXPORT_SYMBOL(ata_std_end_eh
);
966 * ata_port_schedule_eh - schedule error handling without a qc
967 * @ap: ATA port to schedule EH for
969 * Schedule error handling for @ap. EH will kick in as soon as
970 * all commands are drained.
973 * spin_lock_irqsave(host lock)
975 void ata_port_schedule_eh(struct ata_port
*ap
)
977 /* see: ata_std_sched_eh, unless you know better */
978 ap
->ops
->sched_eh(ap
);
980 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
982 static int ata_do_link_abort(struct ata_port
*ap
, struct ata_link
*link
)
984 struct ata_queued_cmd
*qc
;
985 int tag
, nr_aborted
= 0;
987 WARN_ON(!ap
->ops
->error_handler
);
989 /* we're gonna abort all commands, no need for fast drain */
990 ata_eh_set_pending(ap
, 0);
992 /* include internal tag in iteration */
993 ata_qc_for_each_with_internal(ap
, qc
, tag
) {
994 if (qc
&& (!link
|| qc
->dev
->link
== link
)) {
995 qc
->flags
|= ATA_QCFLAG_FAILED
;
1002 ata_port_schedule_eh(ap
);
1008 * ata_link_abort - abort all qc's on the link
1009 * @link: ATA link to abort qc's for
1011 * Abort all active qc's active on @link and schedule EH.
1014 * spin_lock_irqsave(host lock)
1017 * Number of aborted qc's.
1019 int ata_link_abort(struct ata_link
*link
)
1021 return ata_do_link_abort(link
->ap
, link
);
1023 EXPORT_SYMBOL_GPL(ata_link_abort
);
1026 * ata_port_abort - abort all qc's on the port
1027 * @ap: ATA port to abort qc's for
1029 * Abort all active qc's of @ap and schedule EH.
1032 * spin_lock_irqsave(host_set lock)
1035 * Number of aborted qc's.
1037 int ata_port_abort(struct ata_port
*ap
)
1039 return ata_do_link_abort(ap
, NULL
);
1041 EXPORT_SYMBOL_GPL(ata_port_abort
);
1044 * __ata_port_freeze - freeze port
1045 * @ap: ATA port to freeze
1047 * This function is called when HSM violation or some other
1048 * condition disrupts normal operation of the port. Frozen port
1049 * is not allowed to perform any operation until the port is
1050 * thawed, which usually follows a successful reset.
1052 * ap->ops->freeze() callback can be used for freezing the port
1053 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1054 * port cannot be frozen hardware-wise, the interrupt handler
1055 * must ack and clear interrupts unconditionally while the port
1059 * spin_lock_irqsave(host lock)
1061 static void __ata_port_freeze(struct ata_port
*ap
)
1063 WARN_ON(!ap
->ops
->error_handler
);
1065 if (ap
->ops
->freeze
)
1066 ap
->ops
->freeze(ap
);
1068 ap
->pflags
|= ATA_PFLAG_FROZEN
;
1070 trace_ata_port_freeze(ap
);
1074 * ata_port_freeze - abort & freeze port
1075 * @ap: ATA port to freeze
1077 * Abort and freeze @ap. The freeze operation must be called
1078 * first, because some hardware requires special operations
1079 * before the taskfile registers are accessible.
1082 * spin_lock_irqsave(host lock)
1085 * Number of aborted commands.
1087 int ata_port_freeze(struct ata_port
*ap
)
1091 WARN_ON(!ap
->ops
->error_handler
);
1093 __ata_port_freeze(ap
);
1094 nr_aborted
= ata_port_abort(ap
);
1098 EXPORT_SYMBOL_GPL(ata_port_freeze
);
1101 * ata_eh_freeze_port - EH helper to freeze port
1102 * @ap: ATA port to freeze
1109 void ata_eh_freeze_port(struct ata_port
*ap
)
1111 unsigned long flags
;
1113 if (!ap
->ops
->error_handler
)
1116 spin_lock_irqsave(ap
->lock
, flags
);
1117 __ata_port_freeze(ap
);
1118 spin_unlock_irqrestore(ap
->lock
, flags
);
1120 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
1123 * ata_eh_thaw_port - EH helper to thaw port
1124 * @ap: ATA port to thaw
1126 * Thaw frozen port @ap.
1131 void ata_eh_thaw_port(struct ata_port
*ap
)
1133 unsigned long flags
;
1135 if (!ap
->ops
->error_handler
)
1138 spin_lock_irqsave(ap
->lock
, flags
);
1140 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
1145 spin_unlock_irqrestore(ap
->lock
, flags
);
1147 trace_ata_port_thaw(ap
);
1150 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
1155 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1157 struct ata_port
*ap
= qc
->ap
;
1158 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1159 unsigned long flags
;
1161 spin_lock_irqsave(ap
->lock
, flags
);
1162 qc
->scsidone
= ata_eh_scsidone
;
1163 __ata_qc_complete(qc
);
1164 WARN_ON(ata_tag_valid(qc
->tag
));
1165 spin_unlock_irqrestore(ap
->lock
, flags
);
1167 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
1171 * ata_eh_qc_complete - Complete an active ATA command from EH
1172 * @qc: Command to complete
1174 * Indicate to the mid and upper layers that an ATA command has
1175 * completed. To be used from EH.
1177 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1179 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1180 scmd
->retries
= scmd
->allowed
;
1181 __ata_eh_qc_complete(qc
);
1185 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1186 * @qc: Command to retry
1188 * Indicate to the mid and upper layers that an ATA command
1189 * should be retried. To be used from EH.
1191 * SCSI midlayer limits the number of retries to scmd->allowed.
1192 * scmd->allowed is incremented for commands which get retried
1193 * due to unrelated failures (qc->err_mask is zero).
1195 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
1197 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1200 __ata_eh_qc_complete(qc
);
1204 * ata_dev_disable - disable ATA device
1205 * @dev: ATA device to disable
1212 void ata_dev_disable(struct ata_device
*dev
)
1214 if (!ata_dev_enabled(dev
))
1217 ata_dev_warn(dev
, "disable device\n");
1218 ata_acpi_on_disable(dev
);
1219 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
| ATA_DNXFER_QUIET
);
1222 /* From now till the next successful probe, ering is used to
1223 * track probe failures. Clear accumulated device error info.
1225 ata_ering_clear(&dev
->ering
);
1227 EXPORT_SYMBOL_GPL(ata_dev_disable
);
1230 * ata_eh_detach_dev - detach ATA device
1231 * @dev: ATA device to detach
1238 void ata_eh_detach_dev(struct ata_device
*dev
)
1240 struct ata_link
*link
= dev
->link
;
1241 struct ata_port
*ap
= link
->ap
;
1242 struct ata_eh_context
*ehc
= &link
->eh_context
;
1243 unsigned long flags
;
1245 ata_dev_disable(dev
);
1247 spin_lock_irqsave(ap
->lock
, flags
);
1249 dev
->flags
&= ~ATA_DFLAG_DETACH
;
1251 if (ata_scsi_offline_dev(dev
)) {
1252 dev
->flags
|= ATA_DFLAG_DETACHED
;
1253 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1256 /* clear per-dev EH info */
1257 ata_eh_clear_action(link
, dev
, &link
->eh_info
, ATA_EH_PERDEV_MASK
);
1258 ata_eh_clear_action(link
, dev
, &link
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
1259 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
1260 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
1262 spin_unlock_irqrestore(ap
->lock
, flags
);
1266 * ata_eh_about_to_do - about to perform eh_action
1267 * @link: target ATA link
1268 * @dev: target ATA dev for per-dev action (can be NULL)
1269 * @action: action about to be performed
1271 * Called just before performing EH actions to clear related bits
1272 * in @link->eh_info such that eh actions are not unnecessarily
1278 void ata_eh_about_to_do(struct ata_link
*link
, struct ata_device
*dev
,
1279 unsigned int action
)
1281 struct ata_port
*ap
= link
->ap
;
1282 struct ata_eh_info
*ehi
= &link
->eh_info
;
1283 struct ata_eh_context
*ehc
= &link
->eh_context
;
1284 unsigned long flags
;
1286 trace_ata_eh_about_to_do(link
, dev
? dev
->devno
: 0, action
);
1288 spin_lock_irqsave(ap
->lock
, flags
);
1290 ata_eh_clear_action(link
, dev
, ehi
, action
);
1292 /* About to take EH action, set RECOVERED. Ignore actions on
1293 * slave links as master will do them again.
1295 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) && link
!= ap
->slave_link
)
1296 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
1298 spin_unlock_irqrestore(ap
->lock
, flags
);
1302 * ata_eh_done - EH action complete
1303 * @link: ATA link for which EH actions are complete
1304 * @dev: target ATA dev for per-dev action (can be NULL)
1305 * @action: action just completed
1307 * Called right after performing EH actions to clear related bits
1308 * in @link->eh_context.
1313 void ata_eh_done(struct ata_link
*link
, struct ata_device
*dev
,
1314 unsigned int action
)
1316 struct ata_eh_context
*ehc
= &link
->eh_context
;
1318 trace_ata_eh_done(link
, dev
? dev
->devno
: 0, action
);
1320 ata_eh_clear_action(link
, dev
, &ehc
->i
, action
);
1324 * ata_err_string - convert err_mask to descriptive string
1325 * @err_mask: error mask to convert to string
1327 * Convert @err_mask to descriptive string. Errors are
1328 * prioritized according to severity and only the most severe
1329 * error is reported.
1335 * Descriptive string for @err_mask
1337 static const char *ata_err_string(unsigned int err_mask
)
1339 if (err_mask
& AC_ERR_HOST_BUS
)
1340 return "host bus error";
1341 if (err_mask
& AC_ERR_ATA_BUS
)
1342 return "ATA bus error";
1343 if (err_mask
& AC_ERR_TIMEOUT
)
1345 if (err_mask
& AC_ERR_HSM
)
1346 return "HSM violation";
1347 if (err_mask
& AC_ERR_SYSTEM
)
1348 return "internal error";
1349 if (err_mask
& AC_ERR_MEDIA
)
1350 return "media error";
1351 if (err_mask
& AC_ERR_INVALID
)
1352 return "invalid argument";
1353 if (err_mask
& AC_ERR_DEV
)
1354 return "device error";
1355 if (err_mask
& AC_ERR_NCQ
)
1357 if (err_mask
& AC_ERR_NODEV_HINT
)
1358 return "Polling detection error";
1359 return "unknown error";
1363 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1364 * @dev: target ATAPI device
1365 * @r_sense_key: out parameter for sense_key
1367 * Perform ATAPI TEST_UNIT_READY.
1370 * EH context (may sleep).
1373 * 0 on success, AC_ERR_* mask on failure.
1375 unsigned int atapi_eh_tur(struct ata_device
*dev
, u8
*r_sense_key
)
1377 u8 cdb
[ATAPI_CDB_LEN
] = { TEST_UNIT_READY
, 0, 0, 0, 0, 0 };
1378 struct ata_taskfile tf
;
1379 unsigned int err_mask
;
1381 ata_tf_init(dev
, &tf
);
1383 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1384 tf
.command
= ATA_CMD_PACKET
;
1385 tf
.protocol
= ATAPI_PROT_NODATA
;
1387 err_mask
= ata_exec_internal(dev
, &tf
, cdb
, DMA_NONE
, NULL
, 0, 0);
1388 if (err_mask
== AC_ERR_DEV
)
1389 *r_sense_key
= tf
.error
>> 4;
1394 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1395 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1396 * @cmd: scsi command for which the sense code should be set
1398 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1399 * SENSE. This function is an EH helper.
1402 * Kernel thread context (may sleep).
1404 static void ata_eh_request_sense(struct ata_queued_cmd
*qc
,
1405 struct scsi_cmnd
*cmd
)
1407 struct ata_device
*dev
= qc
->dev
;
1408 struct ata_taskfile tf
;
1409 unsigned int err_mask
;
1411 if (qc
->ap
->pflags
& ATA_PFLAG_FROZEN
) {
1412 ata_dev_warn(dev
, "sense data available but port frozen\n");
1416 if (!cmd
|| qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
1419 if (!ata_id_sense_reporting_enabled(dev
->id
)) {
1420 ata_dev_warn(qc
->dev
, "sense data reporting disabled\n");
1424 ata_tf_init(dev
, &tf
);
1425 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1426 tf
.flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1427 tf
.command
= ATA_CMD_REQ_SENSE_DATA
;
1428 tf
.protocol
= ATA_PROT_NODATA
;
1430 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1431 /* Ignore err_mask; ATA_ERR might be set */
1432 if (tf
.status
& ATA_SENSE
) {
1433 ata_scsi_set_sense(dev
, cmd
, tf
.lbah
, tf
.lbam
, tf
.lbal
);
1434 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1436 ata_dev_warn(dev
, "request sense failed stat %02x emask %x\n",
1437 tf
.status
, err_mask
);
1442 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1443 * @dev: device to perform REQUEST_SENSE to
1444 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1445 * @dfl_sense_key: default sense key to use
1447 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1448 * SENSE. This function is EH helper.
1451 * Kernel thread context (may sleep).
1454 * 0 on success, AC_ERR_* mask on failure
1456 unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
1457 u8
*sense_buf
, u8 dfl_sense_key
)
1459 u8 cdb
[ATAPI_CDB_LEN
] =
1460 { REQUEST_SENSE
, 0, 0, 0, SCSI_SENSE_BUFFERSIZE
, 0 };
1461 struct ata_port
*ap
= dev
->link
->ap
;
1462 struct ata_taskfile tf
;
1464 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
1466 /* initialize sense_buf with the error register,
1467 * for the case where they are -not- overwritten
1469 sense_buf
[0] = 0x70;
1470 sense_buf
[2] = dfl_sense_key
;
1472 /* some devices time out if garbage left in tf */
1473 ata_tf_init(dev
, &tf
);
1475 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1476 tf
.command
= ATA_CMD_PACKET
;
1478 /* is it pointless to prefer PIO for "safety reasons"? */
1479 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
1480 tf
.protocol
= ATAPI_PROT_DMA
;
1481 tf
.feature
|= ATAPI_PKT_DMA
;
1483 tf
.protocol
= ATAPI_PROT_PIO
;
1484 tf
.lbam
= SCSI_SENSE_BUFFERSIZE
;
1488 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
1489 sense_buf
, SCSI_SENSE_BUFFERSIZE
, 0);
1493 * ata_eh_analyze_serror - analyze SError for a failed port
1494 * @link: ATA link to analyze SError for
1496 * Analyze SError if available and further determine cause of
1502 static void ata_eh_analyze_serror(struct ata_link
*link
)
1504 struct ata_eh_context
*ehc
= &link
->eh_context
;
1505 u32 serror
= ehc
->i
.serror
;
1506 unsigned int err_mask
= 0, action
= 0;
1509 if (serror
& (SERR_PERSISTENT
| SERR_DATA
)) {
1510 err_mask
|= AC_ERR_ATA_BUS
;
1511 action
|= ATA_EH_RESET
;
1513 if (serror
& SERR_PROTOCOL
) {
1514 err_mask
|= AC_ERR_HSM
;
1515 action
|= ATA_EH_RESET
;
1517 if (serror
& SERR_INTERNAL
) {
1518 err_mask
|= AC_ERR_SYSTEM
;
1519 action
|= ATA_EH_RESET
;
1522 /* Determine whether a hotplug event has occurred. Both
1523 * SError.N/X are considered hotplug events for enabled or
1524 * host links. For disabled PMP links, only N bit is
1525 * considered as X bit is left at 1 for link plugging.
1527 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
)
1528 hotplug_mask
= 0; /* hotplug doesn't work w/ LPM */
1529 else if (!(link
->flags
& ATA_LFLAG_DISABLED
) || ata_is_host_link(link
))
1530 hotplug_mask
= SERR_PHYRDY_CHG
| SERR_DEV_XCHG
;
1532 hotplug_mask
= SERR_PHYRDY_CHG
;
1534 if (serror
& hotplug_mask
)
1535 ata_ehi_hotplugged(&ehc
->i
);
1537 ehc
->i
.err_mask
|= err_mask
;
1538 ehc
->i
.action
|= action
;
1542 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1543 * @qc: qc to analyze
1544 * @tf: Taskfile registers to analyze
1546 * Analyze taskfile of @qc and further determine cause of
1547 * failure. This function also requests ATAPI sense data if
1551 * Kernel thread context (may sleep).
1554 * Determined recovery action
1556 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
,
1557 const struct ata_taskfile
*tf
)
1559 unsigned int tmp
, action
= 0;
1560 u8 stat
= tf
->status
, err
= tf
->error
;
1562 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1563 qc
->err_mask
|= AC_ERR_HSM
;
1564 return ATA_EH_RESET
;
1567 if (stat
& (ATA_ERR
| ATA_DF
)) {
1568 qc
->err_mask
|= AC_ERR_DEV
;
1570 * Sense data reporting does not work if the
1571 * device fault bit is set.
1579 switch (qc
->dev
->class) {
1581 if (stat
& ATA_SENSE
)
1582 ata_eh_request_sense(qc
, qc
->scsicmd
);
1586 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1587 if (err
& (ATA_UNC
| ATA_AMNF
))
1588 qc
->err_mask
|= AC_ERR_MEDIA
;
1590 qc
->err_mask
|= AC_ERR_INVALID
;
1594 if (!(qc
->ap
->pflags
& ATA_PFLAG_FROZEN
)) {
1595 tmp
= atapi_eh_request_sense(qc
->dev
,
1596 qc
->scsicmd
->sense_buffer
,
1597 qc
->result_tf
.error
>> 4);
1599 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1601 qc
->err_mask
|= tmp
;
1605 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
1606 enum scsi_disposition ret
= scsi_check_sense(qc
->scsicmd
);
1608 * SUCCESS here means that the sense code could be
1609 * evaluated and should be passed to the upper layers
1610 * for correct evaluation.
1611 * FAILED means the sense code could not be interpreted
1612 * and the device would need to be reset.
1613 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1614 * command would need to be retried.
1616 if (ret
== NEEDS_RETRY
|| ret
== ADD_TO_MLQUEUE
) {
1617 qc
->flags
|= ATA_QCFLAG_RETRY
;
1618 qc
->err_mask
|= AC_ERR_OTHER
;
1619 } else if (ret
!= SUCCESS
) {
1620 qc
->err_mask
|= AC_ERR_HSM
;
1623 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1624 action
|= ATA_EH_RESET
;
1629 static int ata_eh_categorize_error(unsigned int eflags
, unsigned int err_mask
,
1634 if (!(eflags
& ATA_EFLAG_DUBIOUS_XFER
))
1638 base
= ATA_ECAT_DUBIOUS_NONE
;
1640 if (err_mask
& AC_ERR_ATA_BUS
)
1641 return base
+ ATA_ECAT_ATA_BUS
;
1643 if (err_mask
& AC_ERR_TIMEOUT
)
1644 return base
+ ATA_ECAT_TOUT_HSM
;
1646 if (eflags
& ATA_EFLAG_IS_IO
) {
1647 if (err_mask
& AC_ERR_HSM
)
1648 return base
+ ATA_ECAT_TOUT_HSM
;
1650 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1651 return base
+ ATA_ECAT_UNK_DEV
;
1657 struct speed_down_verdict_arg
{
1660 int nr_errors
[ATA_ECAT_NR
];
1663 static int speed_down_verdict_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1665 struct speed_down_verdict_arg
*arg
= void_arg
;
1668 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) || (ent
->timestamp
< arg
->since
))
1671 cat
= ata_eh_categorize_error(ent
->eflags
, ent
->err_mask
,
1673 arg
->nr_errors
[cat
]++;
1679 * ata_eh_speed_down_verdict - Determine speed down verdict
1680 * @dev: Device of interest
1682 * This function examines error ring of @dev and determines
1683 * whether NCQ needs to be turned off, transfer speed should be
1684 * stepped down, or falling back to PIO is necessary.
1686 * ECAT_ATA_BUS : ATA_BUS error for any command
1688 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1691 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1693 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1694 * data transfer hasn't been verified.
1698 * NCQ_OFF : Turn off NCQ.
1700 * SPEED_DOWN : Speed down transfer speed but don't fall back
1703 * FALLBACK_TO_PIO : Fall back to PIO.
1705 * Even if multiple verdicts are returned, only one action is
1706 * taken per error. An action triggered by non-DUBIOUS errors
1707 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1708 * This is to expedite speed down decisions right after device is
1709 * initially configured.
1711 * The following are speed down rules. #1 and #2 deal with
1714 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1715 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1717 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1718 * occurred during last 5 mins, NCQ_OFF.
1720 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1721 * occurred during last 5 mins, FALLBACK_TO_PIO
1723 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1724 * during last 10 mins, NCQ_OFF.
1726 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1727 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1730 * Inherited from caller.
1733 * OR of ATA_EH_SPDN_* flags.
1735 static unsigned int ata_eh_speed_down_verdict(struct ata_device
*dev
)
1737 const u64 j5mins
= 5LLU * 60 * HZ
, j10mins
= 10LLU * 60 * HZ
;
1738 u64 j64
= get_jiffies_64();
1739 struct speed_down_verdict_arg arg
;
1740 unsigned int verdict
= 0;
1742 /* scan past 5 mins of error history */
1743 memset(&arg
, 0, sizeof(arg
));
1744 arg
.since
= j64
- min(j64
, j5mins
);
1745 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1747 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_ATA_BUS
] +
1748 arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] > 1)
1749 verdict
|= ATA_EH_SPDN_SPEED_DOWN
|
1750 ATA_EH_SPDN_FALLBACK_TO_PIO
| ATA_EH_SPDN_KEEP_ERRORS
;
1752 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] +
1753 arg
.nr_errors
[ATA_ECAT_DUBIOUS_UNK_DEV
] > 1)
1754 verdict
|= ATA_EH_SPDN_NCQ_OFF
| ATA_EH_SPDN_KEEP_ERRORS
;
1756 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1757 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1758 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1759 verdict
|= ATA_EH_SPDN_FALLBACK_TO_PIO
;
1761 /* scan past 10 mins of error history */
1762 memset(&arg
, 0, sizeof(arg
));
1763 arg
.since
= j64
- min(j64
, j10mins
);
1764 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1766 if (arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1767 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 3)
1768 verdict
|= ATA_EH_SPDN_NCQ_OFF
;
1770 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1771 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] > 3 ||
1772 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1773 verdict
|= ATA_EH_SPDN_SPEED_DOWN
;
1779 * ata_eh_speed_down - record error and speed down if necessary
1780 * @dev: Failed device
1781 * @eflags: mask of ATA_EFLAG_* flags
1782 * @err_mask: err_mask of the error
1784 * Record error and examine error history to determine whether
1785 * adjusting transmission speed is necessary. It also sets
1786 * transmission limits appropriately if such adjustment is
1790 * Kernel thread context (may sleep).
1793 * Determined recovery action.
1795 static unsigned int ata_eh_speed_down(struct ata_device
*dev
,
1796 unsigned int eflags
, unsigned int err_mask
)
1798 struct ata_link
*link
= ata_dev_phys_link(dev
);
1800 unsigned int verdict
;
1801 unsigned int action
= 0;
1803 /* don't bother if Cat-0 error */
1804 if (ata_eh_categorize_error(eflags
, err_mask
, &xfer_ok
) == 0)
1807 /* record error and determine whether speed down is necessary */
1808 ata_ering_record(&dev
->ering
, eflags
, err_mask
);
1809 verdict
= ata_eh_speed_down_verdict(dev
);
1812 if ((verdict
& ATA_EH_SPDN_NCQ_OFF
) &&
1813 (dev
->flags
& (ATA_DFLAG_PIO
| ATA_DFLAG_NCQ
|
1814 ATA_DFLAG_NCQ_OFF
)) == ATA_DFLAG_NCQ
) {
1815 dev
->flags
|= ATA_DFLAG_NCQ_OFF
;
1816 ata_dev_warn(dev
, "NCQ disabled due to excessive errors\n");
1821 if (verdict
& ATA_EH_SPDN_SPEED_DOWN
) {
1822 /* speed down SATA link speed if possible */
1823 if (sata_down_spd_limit(link
, 0) == 0) {
1824 action
|= ATA_EH_RESET
;
1828 /* lower transfer mode */
1829 if (dev
->spdn_cnt
< 2) {
1830 static const int dma_dnxfer_sel
[] =
1831 { ATA_DNXFER_DMA
, ATA_DNXFER_40C
};
1832 static const int pio_dnxfer_sel
[] =
1833 { ATA_DNXFER_PIO
, ATA_DNXFER_FORCE_PIO0
};
1836 if (dev
->xfer_shift
!= ATA_SHIFT_PIO
)
1837 sel
= dma_dnxfer_sel
[dev
->spdn_cnt
];
1839 sel
= pio_dnxfer_sel
[dev
->spdn_cnt
];
1843 if (ata_down_xfermask_limit(dev
, sel
) == 0) {
1844 action
|= ATA_EH_RESET
;
1850 /* Fall back to PIO? Slowing down to PIO is meaningless for
1851 * SATA ATA devices. Consider it only for PATA and SATAPI.
1853 if ((verdict
& ATA_EH_SPDN_FALLBACK_TO_PIO
) && (dev
->spdn_cnt
>= 2) &&
1854 (link
->ap
->cbl
!= ATA_CBL_SATA
|| dev
->class == ATA_DEV_ATAPI
) &&
1855 (dev
->xfer_shift
!= ATA_SHIFT_PIO
)) {
1856 if (ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO
) == 0) {
1858 action
|= ATA_EH_RESET
;
1865 /* device has been slowed down, blow error history */
1866 if (!(verdict
& ATA_EH_SPDN_KEEP_ERRORS
))
1867 ata_ering_clear(&dev
->ering
);
1872 * ata_eh_worth_retry - analyze error and decide whether to retry
1873 * @qc: qc to possibly retry
1875 * Look at the cause of the error and decide if a retry
1876 * might be useful or not. We don't want to retry media errors
1877 * because the drive itself has probably already taken 10-30 seconds
1878 * doing its own internal retries before reporting the failure.
1880 static inline int ata_eh_worth_retry(struct ata_queued_cmd
*qc
)
1882 if (qc
->err_mask
& AC_ERR_MEDIA
)
1883 return 0; /* don't retry media errors */
1884 if (qc
->flags
& ATA_QCFLAG_IO
)
1885 return 1; /* otherwise retry anything from fs stack */
1886 if (qc
->err_mask
& AC_ERR_INVALID
)
1887 return 0; /* don't retry these */
1888 return qc
->err_mask
!= AC_ERR_DEV
; /* retry if not dev error */
1892 * ata_eh_quiet - check if we need to be quiet about a command error
1895 * Look at the qc flags anbd its scsi command request flags to determine
1896 * if we need to be quiet about the command failure.
1898 static inline bool ata_eh_quiet(struct ata_queued_cmd
*qc
)
1900 if (qc
->scsicmd
&& scsi_cmd_to_rq(qc
->scsicmd
)->rq_flags
& RQF_QUIET
)
1901 qc
->flags
|= ATA_QCFLAG_QUIET
;
1902 return qc
->flags
& ATA_QCFLAG_QUIET
;
1906 * ata_eh_link_autopsy - analyze error and determine recovery action
1907 * @link: host link to perform autopsy on
1909 * Analyze why @link failed and determine which recovery actions
1910 * are needed. This function also sets more detailed AC_ERR_*
1911 * values and fills sense data for ATAPI CHECK SENSE.
1914 * Kernel thread context (may sleep).
1916 static void ata_eh_link_autopsy(struct ata_link
*link
)
1918 struct ata_port
*ap
= link
->ap
;
1919 struct ata_eh_context
*ehc
= &link
->eh_context
;
1920 struct ata_queued_cmd
*qc
;
1921 struct ata_device
*dev
;
1922 unsigned int all_err_mask
= 0, eflags
= 0;
1923 int tag
, nr_failed
= 0, nr_quiet
= 0;
1927 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
1930 /* obtain and analyze SError */
1931 rc
= sata_scr_read(link
, SCR_ERROR
, &serror
);
1933 ehc
->i
.serror
|= serror
;
1934 ata_eh_analyze_serror(link
);
1935 } else if (rc
!= -EOPNOTSUPP
) {
1936 /* SError read failed, force reset and probing */
1937 ehc
->i
.probe_mask
|= ATA_ALL_DEVICES
;
1938 ehc
->i
.action
|= ATA_EH_RESET
;
1939 ehc
->i
.err_mask
|= AC_ERR_OTHER
;
1942 /* analyze NCQ failure */
1943 ata_eh_analyze_ncq_error(link
);
1945 /* any real error trumps AC_ERR_OTHER */
1946 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
1947 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
1949 all_err_mask
|= ehc
->i
.err_mask
;
1951 ata_qc_for_each_raw(ap
, qc
, tag
) {
1952 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
1953 ata_dev_phys_link(qc
->dev
) != link
)
1956 /* inherit upper level err_mask */
1957 qc
->err_mask
|= ehc
->i
.err_mask
;
1960 ehc
->i
.action
|= ata_eh_analyze_tf(qc
, &qc
->result_tf
);
1962 /* DEV errors are probably spurious in case of ATA_BUS error */
1963 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
1964 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
1967 /* any real error trumps unknown error */
1968 if (qc
->err_mask
& ~AC_ERR_OTHER
)
1969 qc
->err_mask
&= ~AC_ERR_OTHER
;
1972 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
1973 * layers will determine whether the command is worth retrying
1974 * based on the sense data and device class/type. Otherwise,
1975 * determine directly if the command is worth retrying using its
1976 * error mask and flags.
1978 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
1979 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
1980 else if (ata_eh_worth_retry(qc
))
1981 qc
->flags
|= ATA_QCFLAG_RETRY
;
1983 /* accumulate error info */
1984 ehc
->i
.dev
= qc
->dev
;
1985 all_err_mask
|= qc
->err_mask
;
1986 if (qc
->flags
& ATA_QCFLAG_IO
)
1987 eflags
|= ATA_EFLAG_IS_IO
;
1988 trace_ata_eh_link_autopsy_qc(qc
);
1990 /* Count quiet errors */
1991 if (ata_eh_quiet(qc
))
1996 /* If all failed commands requested silence, then be quiet */
1997 if (nr_quiet
== nr_failed
)
1998 ehc
->i
.flags
|= ATA_EHI_QUIET
;
2000 /* enforce default EH actions */
2001 if (ap
->pflags
& ATA_PFLAG_FROZEN
||
2002 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
2003 ehc
->i
.action
|= ATA_EH_RESET
;
2004 else if (((eflags
& ATA_EFLAG_IS_IO
) && all_err_mask
) ||
2005 (!(eflags
& ATA_EFLAG_IS_IO
) && (all_err_mask
& ~AC_ERR_DEV
)))
2006 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2008 /* If we have offending qcs and the associated failed device,
2009 * perform per-dev EH action only on the offending device.
2012 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
2013 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
2014 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
2017 /* propagate timeout to host link */
2018 if ((all_err_mask
& AC_ERR_TIMEOUT
) && !ata_is_host_link(link
))
2019 ap
->link
.eh_context
.i
.err_mask
|= AC_ERR_TIMEOUT
;
2021 /* record error and consider speeding down */
2023 if (!dev
&& ((ata_link_max_devices(link
) == 1 &&
2024 ata_dev_enabled(link
->device
))))
2028 if (dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)
2029 eflags
|= ATA_EFLAG_DUBIOUS_XFER
;
2030 ehc
->i
.action
|= ata_eh_speed_down(dev
, eflags
, all_err_mask
);
2031 trace_ata_eh_link_autopsy(dev
, ehc
->i
.action
, all_err_mask
);
2036 * ata_eh_autopsy - analyze error and determine recovery action
2037 * @ap: host port to perform autopsy on
2039 * Analyze all links of @ap and determine why they failed and
2040 * which recovery actions are needed.
2043 * Kernel thread context (may sleep).
2045 void ata_eh_autopsy(struct ata_port
*ap
)
2047 struct ata_link
*link
;
2049 ata_for_each_link(link
, ap
, EDGE
)
2050 ata_eh_link_autopsy(link
);
2052 /* Handle the frigging slave link. Autopsy is done similarly
2053 * but actions and flags are transferred over to the master
2054 * link and handled from there.
2056 if (ap
->slave_link
) {
2057 struct ata_eh_context
*mehc
= &ap
->link
.eh_context
;
2058 struct ata_eh_context
*sehc
= &ap
->slave_link
->eh_context
;
2060 /* transfer control flags from master to slave */
2061 sehc
->i
.flags
|= mehc
->i
.flags
& ATA_EHI_TO_SLAVE_MASK
;
2063 /* perform autopsy on the slave link */
2064 ata_eh_link_autopsy(ap
->slave_link
);
2066 /* transfer actions from slave to master and clear slave */
2067 ata_eh_about_to_do(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2068 mehc
->i
.action
|= sehc
->i
.action
;
2069 mehc
->i
.dev_action
[1] |= sehc
->i
.dev_action
[1];
2070 mehc
->i
.flags
|= sehc
->i
.flags
;
2071 ata_eh_done(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2074 /* Autopsy of fanout ports can affect host link autopsy.
2075 * Perform host link autopsy last.
2077 if (sata_pmp_attached(ap
))
2078 ata_eh_link_autopsy(&ap
->link
);
2082 * ata_get_cmd_name - get name for ATA command
2083 * @command: ATA command code to get name for
2085 * Return a textual name of the given command or "unknown"
2090 const char *ata_get_cmd_name(u8 command
)
2092 #ifdef CONFIG_ATA_VERBOSE_ERROR
2098 { ATA_CMD_DEV_RESET
, "DEVICE RESET" },
2099 { ATA_CMD_CHK_POWER
, "CHECK POWER MODE" },
2100 { ATA_CMD_STANDBY
, "STANDBY" },
2101 { ATA_CMD_IDLE
, "IDLE" },
2102 { ATA_CMD_EDD
, "EXECUTE DEVICE DIAGNOSTIC" },
2103 { ATA_CMD_DOWNLOAD_MICRO
, "DOWNLOAD MICROCODE" },
2104 { ATA_CMD_DOWNLOAD_MICRO_DMA
, "DOWNLOAD MICROCODE DMA" },
2105 { ATA_CMD_NOP
, "NOP" },
2106 { ATA_CMD_FLUSH
, "FLUSH CACHE" },
2107 { ATA_CMD_FLUSH_EXT
, "FLUSH CACHE EXT" },
2108 { ATA_CMD_ID_ATA
, "IDENTIFY DEVICE" },
2109 { ATA_CMD_ID_ATAPI
, "IDENTIFY PACKET DEVICE" },
2110 { ATA_CMD_SERVICE
, "SERVICE" },
2111 { ATA_CMD_READ
, "READ DMA" },
2112 { ATA_CMD_READ_EXT
, "READ DMA EXT" },
2113 { ATA_CMD_READ_QUEUED
, "READ DMA QUEUED" },
2114 { ATA_CMD_READ_STREAM_EXT
, "READ STREAM EXT" },
2115 { ATA_CMD_READ_STREAM_DMA_EXT
, "READ STREAM DMA EXT" },
2116 { ATA_CMD_WRITE
, "WRITE DMA" },
2117 { ATA_CMD_WRITE_EXT
, "WRITE DMA EXT" },
2118 { ATA_CMD_WRITE_QUEUED
, "WRITE DMA QUEUED EXT" },
2119 { ATA_CMD_WRITE_STREAM_EXT
, "WRITE STREAM EXT" },
2120 { ATA_CMD_WRITE_STREAM_DMA_EXT
, "WRITE STREAM DMA EXT" },
2121 { ATA_CMD_WRITE_FUA_EXT
, "WRITE DMA FUA EXT" },
2122 { ATA_CMD_WRITE_QUEUED_FUA_EXT
, "WRITE DMA QUEUED FUA EXT" },
2123 { ATA_CMD_FPDMA_READ
, "READ FPDMA QUEUED" },
2124 { ATA_CMD_FPDMA_WRITE
, "WRITE FPDMA QUEUED" },
2125 { ATA_CMD_NCQ_NON_DATA
, "NCQ NON-DATA" },
2126 { ATA_CMD_FPDMA_SEND
, "SEND FPDMA QUEUED" },
2127 { ATA_CMD_FPDMA_RECV
, "RECEIVE FPDMA QUEUED" },
2128 { ATA_CMD_PIO_READ
, "READ SECTOR(S)" },
2129 { ATA_CMD_PIO_READ_EXT
, "READ SECTOR(S) EXT" },
2130 { ATA_CMD_PIO_WRITE
, "WRITE SECTOR(S)" },
2131 { ATA_CMD_PIO_WRITE_EXT
, "WRITE SECTOR(S) EXT" },
2132 { ATA_CMD_READ_MULTI
, "READ MULTIPLE" },
2133 { ATA_CMD_READ_MULTI_EXT
, "READ MULTIPLE EXT" },
2134 { ATA_CMD_WRITE_MULTI
, "WRITE MULTIPLE" },
2135 { ATA_CMD_WRITE_MULTI_EXT
, "WRITE MULTIPLE EXT" },
2136 { ATA_CMD_WRITE_MULTI_FUA_EXT
, "WRITE MULTIPLE FUA EXT" },
2137 { ATA_CMD_SET_FEATURES
, "SET FEATURES" },
2138 { ATA_CMD_SET_MULTI
, "SET MULTIPLE MODE" },
2139 { ATA_CMD_VERIFY
, "READ VERIFY SECTOR(S)" },
2140 { ATA_CMD_VERIFY_EXT
, "READ VERIFY SECTOR(S) EXT" },
2141 { ATA_CMD_WRITE_UNCORR_EXT
, "WRITE UNCORRECTABLE EXT" },
2142 { ATA_CMD_STANDBYNOW1
, "STANDBY IMMEDIATE" },
2143 { ATA_CMD_IDLEIMMEDIATE
, "IDLE IMMEDIATE" },
2144 { ATA_CMD_SLEEP
, "SLEEP" },
2145 { ATA_CMD_INIT_DEV_PARAMS
, "INITIALIZE DEVICE PARAMETERS" },
2146 { ATA_CMD_READ_NATIVE_MAX
, "READ NATIVE MAX ADDRESS" },
2147 { ATA_CMD_READ_NATIVE_MAX_EXT
, "READ NATIVE MAX ADDRESS EXT" },
2148 { ATA_CMD_SET_MAX
, "SET MAX ADDRESS" },
2149 { ATA_CMD_SET_MAX_EXT
, "SET MAX ADDRESS EXT" },
2150 { ATA_CMD_READ_LOG_EXT
, "READ LOG EXT" },
2151 { ATA_CMD_WRITE_LOG_EXT
, "WRITE LOG EXT" },
2152 { ATA_CMD_READ_LOG_DMA_EXT
, "READ LOG DMA EXT" },
2153 { ATA_CMD_WRITE_LOG_DMA_EXT
, "WRITE LOG DMA EXT" },
2154 { ATA_CMD_TRUSTED_NONDATA
, "TRUSTED NON-DATA" },
2155 { ATA_CMD_TRUSTED_RCV
, "TRUSTED RECEIVE" },
2156 { ATA_CMD_TRUSTED_RCV_DMA
, "TRUSTED RECEIVE DMA" },
2157 { ATA_CMD_TRUSTED_SND
, "TRUSTED SEND" },
2158 { ATA_CMD_TRUSTED_SND_DMA
, "TRUSTED SEND DMA" },
2159 { ATA_CMD_PMP_READ
, "READ BUFFER" },
2160 { ATA_CMD_PMP_READ_DMA
, "READ BUFFER DMA" },
2161 { ATA_CMD_PMP_WRITE
, "WRITE BUFFER" },
2162 { ATA_CMD_PMP_WRITE_DMA
, "WRITE BUFFER DMA" },
2163 { ATA_CMD_CONF_OVERLAY
, "DEVICE CONFIGURATION OVERLAY" },
2164 { ATA_CMD_SEC_SET_PASS
, "SECURITY SET PASSWORD" },
2165 { ATA_CMD_SEC_UNLOCK
, "SECURITY UNLOCK" },
2166 { ATA_CMD_SEC_ERASE_PREP
, "SECURITY ERASE PREPARE" },
2167 { ATA_CMD_SEC_ERASE_UNIT
, "SECURITY ERASE UNIT" },
2168 { ATA_CMD_SEC_FREEZE_LOCK
, "SECURITY FREEZE LOCK" },
2169 { ATA_CMD_SEC_DISABLE_PASS
, "SECURITY DISABLE PASSWORD" },
2170 { ATA_CMD_CONFIG_STREAM
, "CONFIGURE STREAM" },
2171 { ATA_CMD_SMART
, "SMART" },
2172 { ATA_CMD_MEDIA_LOCK
, "DOOR LOCK" },
2173 { ATA_CMD_MEDIA_UNLOCK
, "DOOR UNLOCK" },
2174 { ATA_CMD_DSM
, "DATA SET MANAGEMENT" },
2175 { ATA_CMD_CHK_MED_CRD_TYP
, "CHECK MEDIA CARD TYPE" },
2176 { ATA_CMD_CFA_REQ_EXT_ERR
, "CFA REQUEST EXTENDED ERROR" },
2177 { ATA_CMD_CFA_WRITE_NE
, "CFA WRITE SECTORS WITHOUT ERASE" },
2178 { ATA_CMD_CFA_TRANS_SECT
, "CFA TRANSLATE SECTOR" },
2179 { ATA_CMD_CFA_ERASE
, "CFA ERASE SECTORS" },
2180 { ATA_CMD_CFA_WRITE_MULT_NE
, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2181 { ATA_CMD_REQ_SENSE_DATA
, "REQUEST SENSE DATA EXT" },
2182 { ATA_CMD_SANITIZE_DEVICE
, "SANITIZE DEVICE" },
2183 { ATA_CMD_ZAC_MGMT_IN
, "ZAC MANAGEMENT IN" },
2184 { ATA_CMD_ZAC_MGMT_OUT
, "ZAC MANAGEMENT OUT" },
2185 { ATA_CMD_READ_LONG
, "READ LONG (with retries)" },
2186 { ATA_CMD_READ_LONG_ONCE
, "READ LONG (without retries)" },
2187 { ATA_CMD_WRITE_LONG
, "WRITE LONG (with retries)" },
2188 { ATA_CMD_WRITE_LONG_ONCE
, "WRITE LONG (without retries)" },
2189 { ATA_CMD_RESTORE
, "RECALIBRATE" },
2190 { 0, NULL
} /* terminate list */
2194 for (i
= 0; cmd_descr
[i
].text
; i
++)
2195 if (cmd_descr
[i
].command
== command
)
2196 return cmd_descr
[i
].text
;
2201 EXPORT_SYMBOL_GPL(ata_get_cmd_name
);
2204 * ata_eh_link_report - report error handling to user
2205 * @link: ATA link EH is going on
2207 * Report EH to user.
2212 static void ata_eh_link_report(struct ata_link
*link
)
2214 struct ata_port
*ap
= link
->ap
;
2215 struct ata_eh_context
*ehc
= &link
->eh_context
;
2216 struct ata_queued_cmd
*qc
;
2217 const char *frozen
, *desc
;
2218 char tries_buf
[6] = "";
2219 int tag
, nr_failed
= 0;
2221 if (ehc
->i
.flags
& ATA_EHI_QUIET
)
2225 if (ehc
->i
.desc
[0] != '\0')
2228 ata_qc_for_each_raw(ap
, qc
, tag
) {
2229 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2230 ata_dev_phys_link(qc
->dev
) != link
||
2231 ((qc
->flags
& ATA_QCFLAG_QUIET
) &&
2232 qc
->err_mask
== AC_ERR_DEV
))
2234 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
2240 if (!nr_failed
&& !ehc
->i
.err_mask
)
2244 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2247 if (ap
->eh_tries
< ATA_EH_MAX_TRIES
)
2248 snprintf(tries_buf
, sizeof(tries_buf
), " t%d",
2252 ata_dev_err(ehc
->i
.dev
, "exception Emask 0x%x "
2253 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2254 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2255 ehc
->i
.action
, frozen
, tries_buf
);
2257 ata_dev_err(ehc
->i
.dev
, "%s\n", desc
);
2259 ata_link_err(link
, "exception Emask 0x%x "
2260 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2261 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2262 ehc
->i
.action
, frozen
, tries_buf
);
2264 ata_link_err(link
, "%s\n", desc
);
2267 #ifdef CONFIG_ATA_VERBOSE_ERROR
2270 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2271 ehc
->i
.serror
& SERR_DATA_RECOVERED
? "RecovData " : "",
2272 ehc
->i
.serror
& SERR_COMM_RECOVERED
? "RecovComm " : "",
2273 ehc
->i
.serror
& SERR_DATA
? "UnrecovData " : "",
2274 ehc
->i
.serror
& SERR_PERSISTENT
? "Persist " : "",
2275 ehc
->i
.serror
& SERR_PROTOCOL
? "Proto " : "",
2276 ehc
->i
.serror
& SERR_INTERNAL
? "HostInt " : "",
2277 ehc
->i
.serror
& SERR_PHYRDY_CHG
? "PHYRdyChg " : "",
2278 ehc
->i
.serror
& SERR_PHY_INT_ERR
? "PHYInt " : "",
2279 ehc
->i
.serror
& SERR_COMM_WAKE
? "CommWake " : "",
2280 ehc
->i
.serror
& SERR_10B_8B_ERR
? "10B8B " : "",
2281 ehc
->i
.serror
& SERR_DISPARITY
? "Dispar " : "",
2282 ehc
->i
.serror
& SERR_CRC
? "BadCRC " : "",
2283 ehc
->i
.serror
& SERR_HANDSHAKE
? "Handshk " : "",
2284 ehc
->i
.serror
& SERR_LINK_SEQ_ERR
? "LinkSeq " : "",
2285 ehc
->i
.serror
& SERR_TRANS_ST_ERROR
? "TrStaTrns " : "",
2286 ehc
->i
.serror
& SERR_UNRECOG_FIS
? "UnrecFIS " : "",
2287 ehc
->i
.serror
& SERR_DEV_XCHG
? "DevExch " : "");
2290 ata_qc_for_each_raw(ap
, qc
, tag
) {
2291 struct ata_taskfile
*cmd
= &qc
->tf
, *res
= &qc
->result_tf
;
2292 char data_buf
[20] = "";
2293 char cdb_buf
[70] = "";
2295 if (!(qc
->flags
& ATA_QCFLAG_FAILED
) ||
2296 ata_dev_phys_link(qc
->dev
) != link
|| !qc
->err_mask
)
2299 if (qc
->dma_dir
!= DMA_NONE
) {
2300 static const char *dma_str
[] = {
2301 [DMA_BIDIRECTIONAL
] = "bidi",
2302 [DMA_TO_DEVICE
] = "out",
2303 [DMA_FROM_DEVICE
] = "in",
2305 const char *prot_str
= NULL
;
2307 switch (qc
->tf
.protocol
) {
2308 case ATA_PROT_UNKNOWN
:
2309 prot_str
= "unknown";
2311 case ATA_PROT_NODATA
:
2312 prot_str
= "nodata";
2321 prot_str
= "ncq dma";
2323 case ATA_PROT_NCQ_NODATA
:
2324 prot_str
= "ncq nodata";
2326 case ATAPI_PROT_NODATA
:
2327 prot_str
= "nodata";
2329 case ATAPI_PROT_PIO
:
2332 case ATAPI_PROT_DMA
:
2336 snprintf(data_buf
, sizeof(data_buf
), " %s %u %s",
2337 prot_str
, qc
->nbytes
, dma_str
[qc
->dma_dir
]);
2340 if (ata_is_atapi(qc
->tf
.protocol
)) {
2341 const u8
*cdb
= qc
->cdb
;
2342 size_t cdb_len
= qc
->dev
->cdb_len
;
2345 cdb
= qc
->scsicmd
->cmnd
;
2346 cdb_len
= qc
->scsicmd
->cmd_len
;
2348 __scsi_format_command(cdb_buf
, sizeof(cdb_buf
),
2351 ata_dev_err(qc
->dev
, "failed command: %s\n",
2352 ata_get_cmd_name(cmd
->command
));
2354 ata_dev_err(qc
->dev
,
2355 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2357 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2358 "Emask 0x%x (%s)%s\n",
2359 cmd
->command
, cmd
->feature
, cmd
->nsect
,
2360 cmd
->lbal
, cmd
->lbam
, cmd
->lbah
,
2361 cmd
->hob_feature
, cmd
->hob_nsect
,
2362 cmd
->hob_lbal
, cmd
->hob_lbam
, cmd
->hob_lbah
,
2363 cmd
->device
, qc
->tag
, data_buf
, cdb_buf
,
2364 res
->status
, res
->error
, res
->nsect
,
2365 res
->lbal
, res
->lbam
, res
->lbah
,
2366 res
->hob_feature
, res
->hob_nsect
,
2367 res
->hob_lbal
, res
->hob_lbam
, res
->hob_lbah
,
2368 res
->device
, qc
->err_mask
, ata_err_string(qc
->err_mask
),
2369 qc
->err_mask
& AC_ERR_NCQ
? " <F>" : "");
2371 #ifdef CONFIG_ATA_VERBOSE_ERROR
2372 if (res
->status
& (ATA_BUSY
| ATA_DRDY
| ATA_DF
| ATA_DRQ
|
2373 ATA_SENSE
| ATA_ERR
)) {
2374 if (res
->status
& ATA_BUSY
)
2375 ata_dev_err(qc
->dev
, "status: { Busy }\n");
2377 ata_dev_err(qc
->dev
, "status: { %s%s%s%s%s}\n",
2378 res
->status
& ATA_DRDY
? "DRDY " : "",
2379 res
->status
& ATA_DF
? "DF " : "",
2380 res
->status
& ATA_DRQ
? "DRQ " : "",
2381 res
->status
& ATA_SENSE
? "SENSE " : "",
2382 res
->status
& ATA_ERR
? "ERR " : "");
2385 if (cmd
->command
!= ATA_CMD_PACKET
&&
2386 (res
->error
& (ATA_ICRC
| ATA_UNC
| ATA_AMNF
| ATA_IDNF
|
2388 ata_dev_err(qc
->dev
, "error: { %s%s%s%s%s}\n",
2389 res
->error
& ATA_ICRC
? "ICRC " : "",
2390 res
->error
& ATA_UNC
? "UNC " : "",
2391 res
->error
& ATA_AMNF
? "AMNF " : "",
2392 res
->error
& ATA_IDNF
? "IDNF " : "",
2393 res
->error
& ATA_ABORTED
? "ABRT " : "");
2399 * ata_eh_report - report error handling to user
2400 * @ap: ATA port to report EH about
2402 * Report EH to user.
2407 void ata_eh_report(struct ata_port
*ap
)
2409 struct ata_link
*link
;
2411 ata_for_each_link(link
, ap
, HOST_FIRST
)
2412 ata_eh_link_report(link
);
2415 static int ata_do_reset(struct ata_link
*link
, ata_reset_fn_t reset
,
2416 unsigned int *classes
, unsigned long deadline
,
2419 struct ata_device
*dev
;
2422 ata_for_each_dev(dev
, link
, ALL
)
2423 classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
2425 return reset(link
, classes
, deadline
);
2428 static int ata_eh_followup_srst_needed(struct ata_link
*link
, int rc
)
2430 if ((link
->flags
& ATA_LFLAG_NO_SRST
) || ata_link_offline(link
))
2434 if (sata_pmp_supported(link
->ap
) && ata_is_host_link(link
))
2439 int ata_eh_reset(struct ata_link
*link
, int classify
,
2440 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
2441 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
2443 struct ata_port
*ap
= link
->ap
;
2444 struct ata_link
*slave
= ap
->slave_link
;
2445 struct ata_eh_context
*ehc
= &link
->eh_context
;
2446 struct ata_eh_context
*sehc
= slave
? &slave
->eh_context
: NULL
;
2447 unsigned int *classes
= ehc
->classes
;
2448 unsigned int lflags
= link
->flags
;
2449 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
2450 int max_tries
= 0, try = 0;
2451 struct ata_link
*failed_link
;
2452 struct ata_device
*dev
;
2453 unsigned long deadline
, now
;
2454 ata_reset_fn_t reset
;
2455 unsigned long flags
;
2462 while (ata_eh_reset_timeouts
[max_tries
] != ULONG_MAX
)
2464 if (link
->flags
& ATA_LFLAG_RST_ONCE
)
2466 if (link
->flags
& ATA_LFLAG_NO_HRST
)
2468 if (link
->flags
& ATA_LFLAG_NO_SRST
)
2471 /* make sure each reset attempt is at least COOL_DOWN apart */
2472 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
2474 WARN_ON(time_after(ehc
->last_reset
, now
));
2475 deadline
= ata_deadline(ehc
->last_reset
,
2476 ATA_EH_RESET_COOL_DOWN
);
2477 if (time_before(now
, deadline
))
2478 schedule_timeout_uninterruptible(deadline
- now
);
2481 spin_lock_irqsave(ap
->lock
, flags
);
2482 ap
->pflags
|= ATA_PFLAG_RESETTING
;
2483 spin_unlock_irqrestore(ap
->lock
, flags
);
2485 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2487 ata_for_each_dev(dev
, link
, ALL
) {
2488 /* If we issue an SRST then an ATA drive (not ATAPI)
2489 * may change configuration and be in PIO0 timing. If
2490 * we do a hard reset (or are coming from power on)
2491 * this is true for ATA or ATAPI. Until we've set a
2492 * suitable controller mode we should not touch the
2493 * bus as we may be talking too fast.
2495 dev
->pio_mode
= XFER_PIO_0
;
2496 dev
->dma_mode
= 0xff;
2498 /* If the controller has a pio mode setup function
2499 * then use it to set the chipset to rights. Don't
2500 * touch the DMA setup as that will be dealt with when
2501 * configuring devices.
2503 if (ap
->ops
->set_piomode
)
2504 ap
->ops
->set_piomode(ap
, dev
);
2507 /* prefer hardreset */
2509 ehc
->i
.action
&= ~ATA_EH_RESET
;
2512 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2513 } else if (softreset
) {
2515 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2519 unsigned long deadline
= ata_deadline(jiffies
,
2520 ATA_EH_PRERESET_TIMEOUT
);
2523 sehc
->i
.action
&= ~ATA_EH_RESET
;
2524 sehc
->i
.action
|= ehc
->i
.action
;
2527 rc
= prereset(link
, deadline
);
2529 /* If present, do prereset on slave link too. Reset
2530 * is skipped iff both master and slave links report
2531 * -ENOENT or clear ATA_EH_RESET.
2533 if (slave
&& (rc
== 0 || rc
== -ENOENT
)) {
2536 tmp
= prereset(slave
, deadline
);
2540 ehc
->i
.action
|= sehc
->i
.action
;
2544 if (rc
== -ENOENT
) {
2545 ata_link_dbg(link
, "port disabled--ignoring\n");
2546 ehc
->i
.action
&= ~ATA_EH_RESET
;
2548 ata_for_each_dev(dev
, link
, ALL
)
2549 classes
[dev
->devno
] = ATA_DEV_NONE
;
2554 "prereset failed (errno=%d)\n",
2559 /* prereset() might have cleared ATA_EH_RESET. If so,
2560 * bang classes, thaw and return.
2562 if (reset
&& !(ehc
->i
.action
& ATA_EH_RESET
)) {
2563 ata_for_each_dev(dev
, link
, ALL
)
2564 classes
[dev
->devno
] = ATA_DEV_NONE
;
2565 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) &&
2566 ata_is_host_link(link
))
2567 ata_eh_thaw_port(ap
);
2577 if (ata_is_host_link(link
))
2578 ata_eh_freeze_port(ap
);
2580 deadline
= ata_deadline(jiffies
, ata_eh_reset_timeouts
[try++]);
2584 ata_link_info(link
, "%s resetting link\n",
2585 reset
== softreset
? "soft" : "hard");
2587 /* mark that this EH session started with reset */
2588 ehc
->last_reset
= jiffies
;
2589 if (reset
== hardreset
) {
2590 ehc
->i
.flags
|= ATA_EHI_DID_HARDRESET
;
2591 trace_ata_link_hardreset_begin(link
, classes
, deadline
);
2593 ehc
->i
.flags
|= ATA_EHI_DID_SOFTRESET
;
2594 trace_ata_link_softreset_begin(link
, classes
, deadline
);
2597 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2598 if (reset
== hardreset
)
2599 trace_ata_link_hardreset_end(link
, classes
, rc
);
2601 trace_ata_link_softreset_end(link
, classes
, rc
);
2602 if (rc
&& rc
!= -EAGAIN
) {
2607 /* hardreset slave link if existent */
2608 if (slave
&& reset
== hardreset
) {
2612 ata_link_info(slave
, "hard resetting link\n");
2614 ata_eh_about_to_do(slave
, NULL
, ATA_EH_RESET
);
2615 trace_ata_slave_hardreset_begin(slave
, classes
,
2617 tmp
= ata_do_reset(slave
, reset
, classes
, deadline
,
2619 trace_ata_slave_hardreset_end(slave
, classes
, tmp
);
2627 failed_link
= slave
;
2633 /* perform follow-up SRST if necessary */
2634 if (reset
== hardreset
&&
2635 ata_eh_followup_srst_needed(link
, rc
)) {
2640 "follow-up softreset required but no softreset available\n");
2646 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2647 trace_ata_link_softreset_begin(link
, classes
, deadline
);
2648 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2649 trace_ata_link_softreset_end(link
, classes
, rc
);
2658 "no reset method available, skipping reset\n");
2659 if (!(lflags
& ATA_LFLAG_ASSUME_CLASS
))
2660 lflags
|= ATA_LFLAG_ASSUME_ATA
;
2664 * Post-reset processing
2666 ata_for_each_dev(dev
, link
, ALL
) {
2667 /* After the reset, the device state is PIO 0 and the
2668 * controller state is undefined. Reset also wakes up
2669 * drives from sleeping mode.
2671 dev
->pio_mode
= XFER_PIO_0
;
2672 dev
->flags
&= ~ATA_DFLAG_SLEEPING
;
2674 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
2677 /* apply class override */
2678 if (lflags
& ATA_LFLAG_ASSUME_ATA
)
2679 classes
[dev
->devno
] = ATA_DEV_ATA
;
2680 else if (lflags
& ATA_LFLAG_ASSUME_SEMB
)
2681 classes
[dev
->devno
] = ATA_DEV_SEMB_UNSUP
;
2684 /* record current link speed */
2685 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0)
2686 link
->sata_spd
= (sstatus
>> 4) & 0xf;
2687 if (slave
&& sata_scr_read(slave
, SCR_STATUS
, &sstatus
) == 0)
2688 slave
->sata_spd
= (sstatus
>> 4) & 0xf;
2691 if (ata_is_host_link(link
))
2692 ata_eh_thaw_port(ap
);
2694 /* postreset() should clear hardware SError. Although SError
2695 * is cleared during link resume, clearing SError here is
2696 * necessary as some PHYs raise hotplug events after SRST.
2697 * This introduces race condition where hotplug occurs between
2698 * reset and here. This race is mediated by cross checking
2699 * link onlineness and classification result later.
2702 postreset(link
, classes
);
2703 trace_ata_link_postreset(link
, classes
, rc
);
2705 postreset(slave
, classes
);
2706 trace_ata_slave_postreset(slave
, classes
, rc
);
2711 * Some controllers can't be frozen very well and may set spurious
2712 * error conditions during reset. Clear accumulated error
2713 * information and re-thaw the port if frozen. As reset is the
2714 * final recovery action and we cross check link onlineness against
2715 * device classification later, no hotplug event is lost by this.
2717 spin_lock_irqsave(link
->ap
->lock
, flags
);
2718 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
2720 memset(&slave
->eh_info
, 0, sizeof(link
->eh_info
));
2721 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
2722 spin_unlock_irqrestore(link
->ap
->lock
, flags
);
2724 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2725 ata_eh_thaw_port(ap
);
2728 * Make sure onlineness and classification result correspond.
2729 * Hotplug could have happened during reset and some
2730 * controllers fail to wait while a drive is spinning up after
2731 * being hotplugged causing misdetection. By cross checking
2732 * link on/offlineness and classification result, those
2733 * conditions can be reliably detected and retried.
2736 ata_for_each_dev(dev
, link
, ALL
) {
2737 if (ata_phys_link_online(ata_dev_phys_link(dev
))) {
2738 if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2739 ata_dev_dbg(dev
, "link online but device misclassified\n");
2740 classes
[dev
->devno
] = ATA_DEV_NONE
;
2743 } else if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
2744 if (ata_class_enabled(classes
[dev
->devno
]))
2746 "link offline, clearing class %d to NONE\n",
2747 classes
[dev
->devno
]);
2748 classes
[dev
->devno
] = ATA_DEV_NONE
;
2749 } else if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2751 "link status unknown, clearing UNKNOWN to NONE\n");
2752 classes
[dev
->devno
] = ATA_DEV_NONE
;
2756 if (classify
&& nr_unknown
) {
2757 if (try < max_tries
) {
2759 "link online but %d devices misclassified, retrying\n",
2766 "link online but %d devices misclassified, "
2767 "device detection might fail\n", nr_unknown
);
2770 /* reset successful, schedule revalidation */
2771 ata_eh_done(link
, NULL
, ATA_EH_RESET
);
2773 ata_eh_done(slave
, NULL
, ATA_EH_RESET
);
2774 ehc
->last_reset
= jiffies
; /* update to completion time */
2775 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2776 link
->lpm_policy
= ATA_LPM_UNKNOWN
; /* reset LPM state */
2780 /* clear hotplug flag */
2781 ehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2783 sehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2785 spin_lock_irqsave(ap
->lock
, flags
);
2786 ap
->pflags
&= ~ATA_PFLAG_RESETTING
;
2787 spin_unlock_irqrestore(ap
->lock
, flags
);
2792 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2793 if (!ata_is_host_link(link
) &&
2794 sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2797 if (try >= max_tries
) {
2799 * Thaw host port even if reset failed, so that the port
2800 * can be retried on the next phy event. This risks
2801 * repeated EH runs but seems to be a better tradeoff than
2802 * shutting down a port after a botched hotplug attempt.
2804 if (ata_is_host_link(link
))
2805 ata_eh_thaw_port(ap
);
2810 if (time_before(now
, deadline
)) {
2811 unsigned long delta
= deadline
- now
;
2813 ata_link_warn(failed_link
,
2814 "reset failed (errno=%d), retrying in %u secs\n",
2815 rc
, DIV_ROUND_UP(jiffies_to_msecs(delta
), 1000));
2819 delta
= schedule_timeout_uninterruptible(delta
);
2824 * While disks spinup behind PMP, some controllers fail sending SRST.
2825 * They need to be reset - as well as the PMP - before retrying.
2827 if (rc
== -ERESTART
) {
2828 if (ata_is_host_link(link
))
2829 ata_eh_thaw_port(ap
);
2833 if (try == max_tries
- 1) {
2834 sata_down_spd_limit(link
, 0);
2836 sata_down_spd_limit(slave
, 0);
2837 } else if (rc
== -EPIPE
)
2838 sata_down_spd_limit(failed_link
, 0);
2845 static inline void ata_eh_pull_park_action(struct ata_port
*ap
)
2847 struct ata_link
*link
;
2848 struct ata_device
*dev
;
2849 unsigned long flags
;
2852 * This function can be thought of as an extended version of
2853 * ata_eh_about_to_do() specially crafted to accommodate the
2854 * requirements of ATA_EH_PARK handling. Since the EH thread
2855 * does not leave the do {} while () loop in ata_eh_recover as
2856 * long as the timeout for a park request to *one* device on
2857 * the port has not expired, and since we still want to pick
2858 * up park requests to other devices on the same port or
2859 * timeout updates for the same device, we have to pull
2860 * ATA_EH_PARK actions from eh_info into eh_context.i
2861 * ourselves at the beginning of each pass over the loop.
2863 * Additionally, all write accesses to &ap->park_req_pending
2864 * through reinit_completion() (see below) or complete_all()
2865 * (see ata_scsi_park_store()) are protected by the host lock.
2866 * As a result we have that park_req_pending.done is zero on
2867 * exit from this function, i.e. when ATA_EH_PARK actions for
2868 * *all* devices on port ap have been pulled into the
2869 * respective eh_context structs. If, and only if,
2870 * park_req_pending.done is non-zero by the time we reach
2871 * wait_for_completion_timeout(), another ATA_EH_PARK action
2872 * has been scheduled for at least one of the devices on port
2873 * ap and we have to cycle over the do {} while () loop in
2874 * ata_eh_recover() again.
2877 spin_lock_irqsave(ap
->lock
, flags
);
2878 reinit_completion(&ap
->park_req_pending
);
2879 ata_for_each_link(link
, ap
, EDGE
) {
2880 ata_for_each_dev(dev
, link
, ALL
) {
2881 struct ata_eh_info
*ehi
= &link
->eh_info
;
2883 link
->eh_context
.i
.dev_action
[dev
->devno
] |=
2884 ehi
->dev_action
[dev
->devno
] & ATA_EH_PARK
;
2885 ata_eh_clear_action(link
, dev
, ehi
, ATA_EH_PARK
);
2888 spin_unlock_irqrestore(ap
->lock
, flags
);
2891 static void ata_eh_park_issue_cmd(struct ata_device
*dev
, int park
)
2893 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
2894 struct ata_taskfile tf
;
2895 unsigned int err_mask
;
2897 ata_tf_init(dev
, &tf
);
2899 ehc
->unloaded_mask
|= 1 << dev
->devno
;
2900 tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
2906 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
2907 tf
.command
= ATA_CMD_CHK_POWER
;
2910 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
2911 tf
.protocol
= ATA_PROT_NODATA
;
2912 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
2913 if (park
&& (err_mask
|| tf
.lbal
!= 0xc4)) {
2914 ata_dev_err(dev
, "head unload failed!\n");
2915 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
2919 static int ata_eh_revalidate_and_attach(struct ata_link
*link
,
2920 struct ata_device
**r_failed_dev
)
2922 struct ata_port
*ap
= link
->ap
;
2923 struct ata_eh_context
*ehc
= &link
->eh_context
;
2924 struct ata_device
*dev
;
2925 unsigned int new_mask
= 0;
2926 unsigned long flags
;
2929 /* For PATA drive side cable detection to work, IDENTIFY must
2930 * be done backwards such that PDIAG- is released by the slave
2931 * device before the master device is identified.
2933 ata_for_each_dev(dev
, link
, ALL_REVERSE
) {
2934 unsigned int action
= ata_eh_dev_action(dev
);
2935 unsigned int readid_flags
= 0;
2937 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
2938 readid_flags
|= ATA_READID_POSTRESET
;
2940 if ((action
& ATA_EH_REVALIDATE
) && ata_dev_enabled(dev
)) {
2941 WARN_ON(dev
->class == ATA_DEV_PMP
);
2943 if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
2948 ata_eh_about_to_do(link
, dev
, ATA_EH_REVALIDATE
);
2949 rc
= ata_dev_revalidate(dev
, ehc
->classes
[dev
->devno
],
2954 ata_eh_done(link
, dev
, ATA_EH_REVALIDATE
);
2956 /* Configuration may have changed, reconfigure
2959 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
2961 /* schedule the scsi_rescan_device() here */
2962 schedule_work(&(ap
->scsi_rescan_task
));
2963 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
2964 ehc
->tries
[dev
->devno
] &&
2965 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
2966 /* Temporarily set dev->class, it will be
2967 * permanently set once all configurations are
2968 * complete. This is necessary because new
2969 * device configuration is done in two
2972 dev
->class = ehc
->classes
[dev
->devno
];
2974 if (dev
->class == ATA_DEV_PMP
)
2975 rc
= sata_pmp_attach(dev
);
2977 rc
= ata_dev_read_id(dev
, &dev
->class,
2978 readid_flags
, dev
->id
);
2980 /* read_id might have changed class, store and reset */
2981 ehc
->classes
[dev
->devno
] = dev
->class;
2982 dev
->class = ATA_DEV_UNKNOWN
;
2986 /* clear error info accumulated during probe */
2987 ata_ering_clear(&dev
->ering
);
2988 new_mask
|= 1 << dev
->devno
;
2991 /* IDENTIFY was issued to non-existent
2992 * device. No need to reset. Just
2993 * thaw and ignore the device.
2995 ata_eh_thaw_port(ap
);
3003 /* PDIAG- should have been released, ask cable type if post-reset */
3004 if ((ehc
->i
.flags
& ATA_EHI_DID_RESET
) && ata_is_host_link(link
)) {
3005 if (ap
->ops
->cable_detect
)
3006 ap
->cbl
= ap
->ops
->cable_detect(ap
);
3010 /* Configure new devices forward such that user doesn't see
3011 * device detection messages backwards.
3013 ata_for_each_dev(dev
, link
, ALL
) {
3014 if (!(new_mask
& (1 << dev
->devno
)))
3017 dev
->class = ehc
->classes
[dev
->devno
];
3019 if (dev
->class == ATA_DEV_PMP
)
3022 ehc
->i
.flags
|= ATA_EHI_PRINTINFO
;
3023 rc
= ata_dev_configure(dev
);
3024 ehc
->i
.flags
&= ~ATA_EHI_PRINTINFO
;
3026 dev
->class = ATA_DEV_UNKNOWN
;
3030 spin_lock_irqsave(ap
->lock
, flags
);
3031 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
3032 spin_unlock_irqrestore(ap
->lock
, flags
);
3034 /* new device discovered, configure xfermode */
3035 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3041 *r_failed_dev
= dev
;
3046 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3047 * @link: link on which timings will be programmed
3048 * @r_failed_dev: out parameter for failed device
3050 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3051 * ata_set_mode() fails, pointer to the failing device is
3052 * returned in @r_failed_dev.
3055 * PCI/etc. bus probe sem.
3058 * 0 on success, negative errno otherwise
3060 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3062 struct ata_port
*ap
= link
->ap
;
3063 struct ata_device
*dev
;
3066 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3067 ata_for_each_dev(dev
, link
, ENABLED
) {
3068 if (!(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)) {
3069 struct ata_ering_entry
*ent
;
3071 ent
= ata_ering_top(&dev
->ering
);
3073 ent
->eflags
&= ~ATA_EFLAG_DUBIOUS_XFER
;
3077 /* has private set_mode? */
3078 if (ap
->ops
->set_mode
)
3079 rc
= ap
->ops
->set_mode(link
, r_failed_dev
);
3081 rc
= ata_do_set_mode(link
, r_failed_dev
);
3083 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3084 ata_for_each_dev(dev
, link
, ENABLED
) {
3085 struct ata_eh_context
*ehc
= &link
->eh_context
;
3086 u8 saved_xfer_mode
= ehc
->saved_xfer_mode
[dev
->devno
];
3087 u8 saved_ncq
= !!(ehc
->saved_ncq_enabled
& (1 << dev
->devno
));
3089 if (dev
->xfer_mode
!= saved_xfer_mode
||
3090 ata_ncq_enabled(dev
) != saved_ncq
)
3091 dev
->flags
|= ATA_DFLAG_DUBIOUS_XFER
;
3098 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3099 * @dev: ATAPI device to clear UA for
3101 * Resets and other operations can make an ATAPI device raise
3102 * UNIT ATTENTION which causes the next operation to fail. This
3103 * function clears UA.
3106 * EH context (may sleep).
3109 * 0 on success, -errno on failure.
3111 static int atapi_eh_clear_ua(struct ata_device
*dev
)
3115 for (i
= 0; i
< ATA_EH_UA_TRIES
; i
++) {
3116 u8
*sense_buffer
= dev
->link
->ap
->sector_buf
;
3118 unsigned int err_mask
;
3120 err_mask
= atapi_eh_tur(dev
, &sense_key
);
3121 if (err_mask
!= 0 && err_mask
!= AC_ERR_DEV
) {
3123 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3128 if (!err_mask
|| sense_key
!= UNIT_ATTENTION
)
3131 err_mask
= atapi_eh_request_sense(dev
, sense_buffer
, sense_key
);
3133 ata_dev_warn(dev
, "failed to clear "
3134 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask
);
3139 ata_dev_warn(dev
, "UNIT ATTENTION persists after %d tries\n",
3146 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3147 * @dev: ATA device which may need FLUSH retry
3149 * If @dev failed FLUSH, it needs to be reported upper layer
3150 * immediately as it means that @dev failed to remap and already
3151 * lost at least a sector and further FLUSH retrials won't make
3152 * any difference to the lost sector. However, if FLUSH failed
3153 * for other reasons, for example transmission error, FLUSH needs
3156 * This function determines whether FLUSH failure retry is
3157 * necessary and performs it if so.
3160 * 0 if EH can continue, -errno if EH needs to be repeated.
3162 static int ata_eh_maybe_retry_flush(struct ata_device
*dev
)
3164 struct ata_link
*link
= dev
->link
;
3165 struct ata_port
*ap
= link
->ap
;
3166 struct ata_queued_cmd
*qc
;
3167 struct ata_taskfile tf
;
3168 unsigned int err_mask
;
3171 /* did flush fail for this device? */
3172 if (!ata_tag_valid(link
->active_tag
))
3175 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
3176 if (qc
->dev
!= dev
|| (qc
->tf
.command
!= ATA_CMD_FLUSH_EXT
&&
3177 qc
->tf
.command
!= ATA_CMD_FLUSH
))
3180 /* if the device failed it, it should be reported to upper layers */
3181 if (qc
->err_mask
& AC_ERR_DEV
)
3184 /* flush failed for some other reason, give it another shot */
3185 ata_tf_init(dev
, &tf
);
3187 tf
.command
= qc
->tf
.command
;
3188 tf
.flags
|= ATA_TFLAG_DEVICE
;
3189 tf
.protocol
= ATA_PROT_NODATA
;
3191 ata_dev_warn(dev
, "retrying FLUSH 0x%x Emask 0x%x\n",
3192 tf
.command
, qc
->err_mask
);
3194 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3197 * FLUSH is complete but there's no way to
3198 * successfully complete a failed command from EH.
3199 * Making sure retry is allowed at least once and
3200 * retrying it should do the trick - whatever was in
3201 * the cache is already on the platter and this won't
3202 * cause infinite loop.
3204 qc
->scsicmd
->allowed
= max(qc
->scsicmd
->allowed
, 1);
3206 ata_dev_warn(dev
, "FLUSH failed Emask 0x%x\n",
3210 /* if device failed it, report it to upper layers */
3211 if (err_mask
& AC_ERR_DEV
) {
3212 qc
->err_mask
|= AC_ERR_DEV
;
3214 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
3222 * ata_eh_set_lpm - configure SATA interface power management
3223 * @link: link to configure power management
3224 * @policy: the link power management policy
3225 * @r_failed_dev: out parameter for failed device
3227 * Enable SATA Interface power management. This will enable
3228 * Device Interface Power Management (DIPM) for min_power and
3229 * medium_power_with_dipm policies, and then call driver specific
3230 * callbacks for enabling Host Initiated Power management.
3236 * 0 on success, -errno on failure.
3238 static int ata_eh_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
3239 struct ata_device
**r_failed_dev
)
3241 struct ata_port
*ap
= ata_is_host_link(link
) ? link
->ap
: NULL
;
3242 struct ata_eh_context
*ehc
= &link
->eh_context
;
3243 struct ata_device
*dev
, *link_dev
= NULL
, *lpm_dev
= NULL
;
3244 enum ata_lpm_policy old_policy
= link
->lpm_policy
;
3245 bool no_dipm
= link
->ap
->flags
& ATA_FLAG_NO_DIPM
;
3246 unsigned int hints
= ATA_LPM_EMPTY
| ATA_LPM_HIPM
;
3247 unsigned int err_mask
;
3250 /* if the link or host doesn't do LPM, noop */
3251 if (!IS_ENABLED(CONFIG_SATA_HOST
) ||
3252 (link
->flags
& ATA_LFLAG_NO_LPM
) || (ap
&& !ap
->ops
->set_lpm
))
3256 * DIPM is enabled only for MIN_POWER as some devices
3257 * misbehave when the host NACKs transition to SLUMBER. Order
3258 * device and link configurations such that the host always
3259 * allows DIPM requests.
3261 ata_for_each_dev(dev
, link
, ENABLED
) {
3262 bool hipm
= ata_id_has_hipm(dev
->id
);
3263 bool dipm
= ata_id_has_dipm(dev
->id
) && !no_dipm
;
3265 /* find the first enabled and LPM enabled devices */
3269 if (!lpm_dev
&& (hipm
|| dipm
))
3272 hints
&= ~ATA_LPM_EMPTY
;
3274 hints
&= ~ATA_LPM_HIPM
;
3276 /* disable DIPM before changing link config */
3277 if (policy
< ATA_LPM_MED_POWER_WITH_DIPM
&& dipm
) {
3278 err_mask
= ata_dev_set_feature(dev
,
3279 SETFEATURES_SATA_DISABLE
, SATA_DIPM
);
3280 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3282 "failed to disable DIPM, Emask 0x%x\n",
3291 rc
= ap
->ops
->set_lpm(link
, policy
, hints
);
3292 if (!rc
&& ap
->slave_link
)
3293 rc
= ap
->ops
->set_lpm(ap
->slave_link
, policy
, hints
);
3295 rc
= sata_pmp_set_lpm(link
, policy
, hints
);
3298 * Attribute link config failure to the first (LPM) enabled
3299 * device on the link.
3302 if (rc
== -EOPNOTSUPP
) {
3303 link
->flags
|= ATA_LFLAG_NO_LPM
;
3306 dev
= lpm_dev
? lpm_dev
: link_dev
;
3311 * Low level driver acked the transition. Issue DIPM command
3312 * with the new policy set.
3314 link
->lpm_policy
= policy
;
3315 if (ap
&& ap
->slave_link
)
3316 ap
->slave_link
->lpm_policy
= policy
;
3318 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3319 ata_for_each_dev(dev
, link
, ENABLED
) {
3320 if (policy
>= ATA_LPM_MED_POWER_WITH_DIPM
&& !no_dipm
&&
3321 ata_id_has_dipm(dev
->id
)) {
3322 err_mask
= ata_dev_set_feature(dev
,
3323 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
3324 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3326 "failed to enable DIPM, Emask 0x%x\n",
3334 link
->last_lpm_change
= jiffies
;
3335 link
->flags
|= ATA_LFLAG_CHANGED
;
3340 /* restore the old policy */
3341 link
->lpm_policy
= old_policy
;
3342 if (ap
&& ap
->slave_link
)
3343 ap
->slave_link
->lpm_policy
= old_policy
;
3345 /* if no device or only one more chance is left, disable LPM */
3346 if (!dev
|| ehc
->tries
[dev
->devno
] <= 2) {
3347 ata_link_warn(link
, "disabling LPM on the link\n");
3348 link
->flags
|= ATA_LFLAG_NO_LPM
;
3351 *r_failed_dev
= dev
;
3355 int ata_link_nr_enabled(struct ata_link
*link
)
3357 struct ata_device
*dev
;
3360 ata_for_each_dev(dev
, link
, ENABLED
)
3365 static int ata_link_nr_vacant(struct ata_link
*link
)
3367 struct ata_device
*dev
;
3370 ata_for_each_dev(dev
, link
, ALL
)
3371 if (dev
->class == ATA_DEV_UNKNOWN
)
3376 static int ata_eh_skip_recovery(struct ata_link
*link
)
3378 struct ata_port
*ap
= link
->ap
;
3379 struct ata_eh_context
*ehc
= &link
->eh_context
;
3380 struct ata_device
*dev
;
3382 /* skip disabled links */
3383 if (link
->flags
& ATA_LFLAG_DISABLED
)
3386 /* skip if explicitly requested */
3387 if (ehc
->i
.flags
& ATA_EHI_NO_RECOVERY
)
3390 /* thaw frozen port and recover failed devices */
3391 if ((ap
->pflags
& ATA_PFLAG_FROZEN
) || ata_link_nr_enabled(link
))
3394 /* reset at least once if reset is requested */
3395 if ((ehc
->i
.action
& ATA_EH_RESET
) &&
3396 !(ehc
->i
.flags
& ATA_EHI_DID_RESET
))
3399 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3400 ata_for_each_dev(dev
, link
, ALL
) {
3401 if (dev
->class == ATA_DEV_UNKNOWN
&&
3402 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
3409 static int ata_count_probe_trials_cb(struct ata_ering_entry
*ent
, void *void_arg
)
3411 u64 interval
= msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL
);
3412 u64 now
= get_jiffies_64();
3413 int *trials
= void_arg
;
3415 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) ||
3416 (ent
->timestamp
< now
- min(now
, interval
)))
3423 static int ata_eh_schedule_probe(struct ata_device
*dev
)
3425 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3426 struct ata_link
*link
= ata_dev_phys_link(dev
);
3429 if (!(ehc
->i
.probe_mask
& (1 << dev
->devno
)) ||
3430 (ehc
->did_probe_mask
& (1 << dev
->devno
)))
3433 ata_eh_detach_dev(dev
);
3435 ehc
->did_probe_mask
|= (1 << dev
->devno
);
3436 ehc
->i
.action
|= ATA_EH_RESET
;
3437 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
3438 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
3440 /* the link maybe in a deep sleep, wake it up */
3441 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
) {
3442 if (ata_is_host_link(link
))
3443 link
->ap
->ops
->set_lpm(link
, ATA_LPM_MAX_POWER
,
3446 sata_pmp_set_lpm(link
, ATA_LPM_MAX_POWER
,
3450 /* Record and count probe trials on the ering. The specific
3451 * error mask used is irrelevant. Because a successful device
3452 * detection clears the ering, this count accumulates only if
3453 * there are consecutive failed probes.
3455 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3456 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3457 * forced to 1.5Gbps.
3459 * This is to work around cases where failed link speed
3460 * negotiation results in device misdetection leading to
3461 * infinite DEVXCHG or PHRDY CHG events.
3463 ata_ering_record(&dev
->ering
, 0, AC_ERR_OTHER
);
3464 ata_ering_map(&dev
->ering
, ata_count_probe_trials_cb
, &trials
);
3466 if (trials
> ATA_EH_PROBE_TRIALS
)
3467 sata_down_spd_limit(link
, 1);
3472 static int ata_eh_handle_dev_fail(struct ata_device
*dev
, int err
)
3474 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3476 /* -EAGAIN from EH routine indicates retry without prejudice.
3477 * The requester is responsible for ensuring forward progress.
3480 ehc
->tries
[dev
->devno
]--;
3484 /* device missing or wrong IDENTIFY data, schedule probing */
3485 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
3488 /* give it just one more chance */
3489 ehc
->tries
[dev
->devno
] = min(ehc
->tries
[dev
->devno
], 1);
3492 if (ehc
->tries
[dev
->devno
] == 1) {
3493 /* This is the last chance, better to slow
3494 * down than lose it.
3496 sata_down_spd_limit(ata_dev_phys_link(dev
), 0);
3497 if (dev
->pio_mode
> XFER_PIO_0
)
3498 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
3502 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
3503 /* disable device if it has used up all its chances */
3504 ata_dev_disable(dev
);
3506 /* detach if offline */
3507 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
3508 ata_eh_detach_dev(dev
);
3510 /* schedule probe if necessary */
3511 if (ata_eh_schedule_probe(dev
)) {
3512 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3513 memset(ehc
->cmd_timeout_idx
[dev
->devno
], 0,
3514 sizeof(ehc
->cmd_timeout_idx
[dev
->devno
]));
3519 ehc
->i
.action
|= ATA_EH_RESET
;
3525 * ata_eh_recover - recover host port after error
3526 * @ap: host port to recover
3527 * @prereset: prereset method (can be NULL)
3528 * @softreset: softreset method (can be NULL)
3529 * @hardreset: hardreset method (can be NULL)
3530 * @postreset: postreset method (can be NULL)
3531 * @r_failed_link: out parameter for failed link
3533 * This is the alpha and omega, eum and yang, heart and soul of
3534 * libata exception handling. On entry, actions required to
3535 * recover each link and hotplug requests are recorded in the
3536 * link's eh_context. This function executes all the operations
3537 * with appropriate retrials and fallbacks to resurrect failed
3538 * devices, detach goners and greet newcomers.
3541 * Kernel thread context (may sleep).
3544 * 0 on success, -errno on failure.
3546 int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3547 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3548 ata_postreset_fn_t postreset
,
3549 struct ata_link
**r_failed_link
)
3551 struct ata_link
*link
;
3552 struct ata_device
*dev
;
3554 unsigned long flags
, deadline
;
3556 /* prep for recovery */
3557 ata_for_each_link(link
, ap
, EDGE
) {
3558 struct ata_eh_context
*ehc
= &link
->eh_context
;
3560 /* re-enable link? */
3561 if (ehc
->i
.action
& ATA_EH_ENABLE_LINK
) {
3562 ata_eh_about_to_do(link
, NULL
, ATA_EH_ENABLE_LINK
);
3563 spin_lock_irqsave(ap
->lock
, flags
);
3564 link
->flags
&= ~ATA_LFLAG_DISABLED
;
3565 spin_unlock_irqrestore(ap
->lock
, flags
);
3566 ata_eh_done(link
, NULL
, ATA_EH_ENABLE_LINK
);
3569 ata_for_each_dev(dev
, link
, ALL
) {
3570 if (link
->flags
& ATA_LFLAG_NO_RETRY
)
3571 ehc
->tries
[dev
->devno
] = 1;
3573 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3575 /* collect port action mask recorded in dev actions */
3576 ehc
->i
.action
|= ehc
->i
.dev_action
[dev
->devno
] &
3577 ~ATA_EH_PERDEV_MASK
;
3578 ehc
->i
.dev_action
[dev
->devno
] &= ATA_EH_PERDEV_MASK
;
3580 /* process hotplug request */
3581 if (dev
->flags
& ATA_DFLAG_DETACH
)
3582 ata_eh_detach_dev(dev
);
3584 /* schedule probe if necessary */
3585 if (!ata_dev_enabled(dev
))
3586 ata_eh_schedule_probe(dev
);
3593 /* if UNLOADING, finish immediately */
3594 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
3598 ata_for_each_link(link
, ap
, EDGE
) {
3599 struct ata_eh_context
*ehc
= &link
->eh_context
;
3601 /* skip EH if possible. */
3602 if (ata_eh_skip_recovery(link
))
3605 ata_for_each_dev(dev
, link
, ALL
)
3606 ehc
->classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
3610 ata_for_each_link(link
, ap
, EDGE
) {
3611 struct ata_eh_context
*ehc
= &link
->eh_context
;
3613 if (!(ehc
->i
.action
& ATA_EH_RESET
))
3616 rc
= ata_eh_reset(link
, ata_link_nr_vacant(link
),
3617 prereset
, softreset
, hardreset
, postreset
);
3619 ata_link_err(link
, "reset failed, giving up\n");
3628 * clears ATA_EH_PARK in eh_info and resets
3629 * ap->park_req_pending
3631 ata_eh_pull_park_action(ap
);
3634 ata_for_each_link(link
, ap
, EDGE
) {
3635 ata_for_each_dev(dev
, link
, ALL
) {
3636 struct ata_eh_context
*ehc
= &link
->eh_context
;
3639 if (dev
->class != ATA_DEV_ATA
&&
3640 dev
->class != ATA_DEV_ZAC
)
3642 if (!(ehc
->i
.dev_action
[dev
->devno
] &
3645 tmp
= dev
->unpark_deadline
;
3646 if (time_before(deadline
, tmp
))
3648 else if (time_before_eq(tmp
, jiffies
))
3650 if (ehc
->unloaded_mask
& (1 << dev
->devno
))
3653 ata_eh_park_issue_cmd(dev
, 1);
3658 if (time_before_eq(deadline
, now
))
3662 deadline
= wait_for_completion_timeout(&ap
->park_req_pending
,
3666 ata_for_each_link(link
, ap
, EDGE
) {
3667 ata_for_each_dev(dev
, link
, ALL
) {
3668 if (!(link
->eh_context
.unloaded_mask
&
3672 ata_eh_park_issue_cmd(dev
, 0);
3673 ata_eh_done(link
, dev
, ATA_EH_PARK
);
3679 ata_for_each_link(link
, ap
, PMP_FIRST
) {
3680 struct ata_eh_context
*ehc
= &link
->eh_context
;
3682 if (sata_pmp_attached(ap
) && ata_is_host_link(link
))
3685 /* revalidate existing devices and attach new ones */
3686 rc
= ata_eh_revalidate_and_attach(link
, &dev
);
3690 /* if PMP got attached, return, pmp EH will take care of it */
3691 if (link
->device
->class == ATA_DEV_PMP
) {
3696 /* configure transfer mode if necessary */
3697 if (ehc
->i
.flags
& ATA_EHI_SETMODE
) {
3698 rc
= ata_set_mode(link
, &dev
);
3701 ehc
->i
.flags
&= ~ATA_EHI_SETMODE
;
3704 /* If reset has been issued, clear UA to avoid
3705 * disrupting the current users of the device.
3707 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
3708 ata_for_each_dev(dev
, link
, ALL
) {
3709 if (dev
->class != ATA_DEV_ATAPI
)
3711 rc
= atapi_eh_clear_ua(dev
);
3714 if (zpodd_dev_enabled(dev
))
3715 zpodd_post_poweron(dev
);
3719 /* retry flush if necessary */
3720 ata_for_each_dev(dev
, link
, ALL
) {
3721 if (dev
->class != ATA_DEV_ATA
&&
3722 dev
->class != ATA_DEV_ZAC
)
3724 rc
= ata_eh_maybe_retry_flush(dev
);
3730 /* configure link power saving */
3731 if (link
->lpm_policy
!= ap
->target_lpm_policy
) {
3732 rc
= ata_eh_set_lpm(link
, ap
->target_lpm_policy
, &dev
);
3737 /* this link is okay now */
3744 ata_eh_handle_dev_fail(dev
, rc
);
3746 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
3747 /* PMP reset requires working host port.
3748 * Can't retry if it's frozen.
3750 if (sata_pmp_attached(ap
))
3760 if (rc
&& r_failed_link
)
3761 *r_failed_link
= link
;
3767 * ata_eh_finish - finish up EH
3768 * @ap: host port to finish EH for
3770 * Recovery is complete. Clean up EH states and retry or finish
3776 void ata_eh_finish(struct ata_port
*ap
)
3778 struct ata_queued_cmd
*qc
;
3781 /* retry or finish qcs */
3782 ata_qc_for_each_raw(ap
, qc
, tag
) {
3783 if (!(qc
->flags
& ATA_QCFLAG_FAILED
))
3787 /* FIXME: Once EH migration is complete,
3788 * generate sense data in this function,
3789 * considering both err_mask and tf.
3791 if (qc
->flags
& ATA_QCFLAG_RETRY
)
3792 ata_eh_qc_retry(qc
);
3794 ata_eh_qc_complete(qc
);
3796 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
3797 ata_eh_qc_complete(qc
);
3799 /* feed zero TF to sense generation */
3800 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
3801 ata_eh_qc_retry(qc
);
3806 /* make sure nr_active_links is zero after EH */
3807 WARN_ON(ap
->nr_active_links
);
3808 ap
->nr_active_links
= 0;
3812 * ata_do_eh - do standard error handling
3813 * @ap: host port to handle error for
3815 * @prereset: prereset method (can be NULL)
3816 * @softreset: softreset method (can be NULL)
3817 * @hardreset: hardreset method (can be NULL)
3818 * @postreset: postreset method (can be NULL)
3820 * Perform standard error handling sequence.
3823 * Kernel thread context (may sleep).
3825 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3826 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3827 ata_postreset_fn_t postreset
)
3829 struct ata_device
*dev
;
3835 rc
= ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
,
3838 ata_for_each_dev(dev
, &ap
->link
, ALL
)
3839 ata_dev_disable(dev
);
3846 * ata_std_error_handler - standard error handler
3847 * @ap: host port to handle error for
3849 * Standard error handler
3852 * Kernel thread context (may sleep).
3854 void ata_std_error_handler(struct ata_port
*ap
)
3856 struct ata_port_operations
*ops
= ap
->ops
;
3857 ata_reset_fn_t hardreset
= ops
->hardreset
;
3859 /* ignore built-in hardreset if SCR access is not available */
3860 if (hardreset
== sata_std_hardreset
&& !sata_scr_valid(&ap
->link
))
3863 ata_do_eh(ap
, ops
->prereset
, ops
->softreset
, hardreset
, ops
->postreset
);
3865 EXPORT_SYMBOL_GPL(ata_std_error_handler
);
3869 * ata_eh_handle_port_suspend - perform port suspend operation
3870 * @ap: port to suspend
3875 * Kernel thread context (may sleep).
3877 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
3879 unsigned long flags
;
3881 struct ata_device
*dev
;
3883 /* are we suspending? */
3884 spin_lock_irqsave(ap
->lock
, flags
);
3885 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
3886 ap
->pm_mesg
.event
& PM_EVENT_RESUME
) {
3887 spin_unlock_irqrestore(ap
->lock
, flags
);
3890 spin_unlock_irqrestore(ap
->lock
, flags
);
3892 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
3895 * If we have a ZPODD attached, check its zero
3896 * power ready status before the port is frozen.
3897 * Only needed for runtime suspend.
3899 if (PMSG_IS_AUTO(ap
->pm_mesg
)) {
3900 ata_for_each_dev(dev
, &ap
->link
, ENABLED
) {
3901 if (zpodd_dev_enabled(dev
))
3902 zpodd_on_suspend(dev
);
3907 ata_eh_freeze_port(ap
);
3909 if (ap
->ops
->port_suspend
)
3910 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
3912 ata_acpi_set_state(ap
, ap
->pm_mesg
);
3914 /* update the flags */
3915 spin_lock_irqsave(ap
->lock
, flags
);
3917 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
3919 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
3920 else if (ap
->pflags
& ATA_PFLAG_FROZEN
)
3921 ata_port_schedule_eh(ap
);
3923 spin_unlock_irqrestore(ap
->lock
, flags
);
3929 * ata_eh_handle_port_resume - perform port resume operation
3930 * @ap: port to resume
3935 * Kernel thread context (may sleep).
3937 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
3939 struct ata_link
*link
;
3940 struct ata_device
*dev
;
3941 unsigned long flags
;
3943 /* are we resuming? */
3944 spin_lock_irqsave(ap
->lock
, flags
);
3945 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
3946 !(ap
->pm_mesg
.event
& PM_EVENT_RESUME
)) {
3947 spin_unlock_irqrestore(ap
->lock
, flags
);
3950 spin_unlock_irqrestore(ap
->lock
, flags
);
3952 WARN_ON(!(ap
->pflags
& ATA_PFLAG_SUSPENDED
));
3955 * Error timestamps are in jiffies which doesn't run while
3956 * suspended and PHY events during resume isn't too uncommon.
3957 * When the two are combined, it can lead to unnecessary speed
3958 * downs if the machine is suspended and resumed repeatedly.
3959 * Clear error history.
3961 ata_for_each_link(link
, ap
, HOST_FIRST
)
3962 ata_for_each_dev(dev
, link
, ALL
)
3963 ata_ering_clear(&dev
->ering
);
3965 ata_acpi_set_state(ap
, ap
->pm_mesg
);
3967 if (ap
->ops
->port_resume
)
3968 ap
->ops
->port_resume(ap
);
3970 /* tell ACPI that we're resuming */
3971 ata_acpi_on_resume(ap
);
3973 /* update the flags */
3974 spin_lock_irqsave(ap
->lock
, flags
);
3975 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
3976 spin_unlock_irqrestore(ap
->lock
, flags
);
3978 #endif /* CONFIG_PM */