]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sata_mv: fix broken DSM/TRIM support (v2)
authorMark Lord <kernel@teksavvy.com>
Fri, 20 Aug 2010 01:40:44 +0000 (21:40 -0400)
committerPaul Gortmaker <paul.gortmaker@windriver.com>
Thu, 6 Jan 2011 23:07:39 +0000 (18:07 -0500)
commit 44b733809a5aba7f6b15a548d31a56d25bf3851c upstream.

Fix DSM/TRIM commands in sata_mv (v2).
These need to be issued using old-school "BM DMA",
rather than via the EDMA host queue.

Since the chips don't have proper BM DMA status,
we need to be more careful with setting the ATA_DMA_INTR bit,
since DSM/TRIM often has a long delay between "DMA complete"
and "command complete".

GEN_I chips don't have BM DMA, so no TRIM for them.

Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
drivers/ata/sata_mv.c

index 71cc0d42f9e1c015ba90d27d4a457d5e247fddc5..39af57a73e741b3267a94c8ae13a3fde12ba2868 100644 (file)
@@ -1886,19 +1886,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
  *     LOCKING:
  *     Inherited from caller.
  */
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
 {
-       struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = mv_ap_base(ap);
        u32 cmd;
 
        /* clear start/stop bit */
        cmd = readl(port_mmio + BMDMA_CMD);
-       cmd &= ~ATA_DMA_START;
-       writelfl(cmd, port_mmio + BMDMA_CMD);
+       if (cmd & ATA_DMA_START) {
+               cmd &= ~ATA_DMA_START;
+               writelfl(cmd, port_mmio + BMDMA_CMD);
+
+               /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+               ata_sff_dma_pause(ap);
+       }
+}
 
-       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-       ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+       mv_bmdma_stop_ap(qc->ap);
 }
 
 /**
@@ -1922,8 +1928,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
        reg = readl(port_mmio + BMDMA_STATUS);
        if (reg & ATA_DMA_ACTIVE)
                status = ATA_DMA_ACTIVE;
-       else
+       else if (reg & ATA_DMA_ERR)
                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+       else {
+               /*
+                * Just because DMA_ACTIVE is 0 (DMA completed),
+                * this does _not_ mean the device is "done".
+                * So we should not yet be signalling ATA_DMA_INTR
+                * in some cases.  Eg. DSM/TRIM, and perhaps others.
+                */
+               mv_bmdma_stop_ap(ap);
+               if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+                       status = 0;
+               else
+                       status = ATA_DMA_INTR;
+       }
        return status;
 }
 
@@ -1983,6 +2002,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 
        switch (tf->protocol) {
        case ATA_PROT_DMA:
+               if (tf->command == ATA_CMD_DSM)
+                       return;
+               /* fall-thru */
        case ATA_PROT_NCQ:
                break;  /* continue below */
        case ATA_PROT_PIO:
@@ -2082,6 +2104,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        if ((tf->protocol != ATA_PROT_DMA) &&
            (tf->protocol != ATA_PROT_NCQ))
                return;
+       if (tf->command == ATA_CMD_DSM)
+               return;  /* use bmdma for this */
 
        /* Fill in Gen IIE command request block */
        if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2277,6 +2301,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 
        switch (qc->tf.protocol) {
        case ATA_PROT_DMA:
+               if (qc->tf.command == ATA_CMD_DSM) {
+                       if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
+                               return AC_ERR_OTHER;
+                       break;  /* use bmdma for this */
+               }
+               /* fall thru */
        case ATA_PROT_NCQ:
                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;