X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fata%2Fsata_mv.c;h=d15caf32045efcdb698661210168acf551e052f1;hb=f273827e2aadcf2f74a7bdc9ad715a1b20ea7dda;hp=f117f6a01676bc4af2d35fc256fe354b2e74855e;hpb=0c58912e192fc3a4835d772aafa40b72552b819f;p=linux-2.6 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f117f6a016..d15caf3204 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -331,6 +331,7 @@ enum { /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ }; @@ -448,6 +449,7 @@ static void mv_error_handler(struct ata_port *ap); static void mv_post_int_cmd(struct ata_queued_cmd *qc); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); +static void mv6_dev_config(struct ata_device *dev); static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, @@ -471,8 +473,9 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); -static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, - void __iomem *port_mmio); +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq); +static int __mv_stop_dma(struct ata_port *ap); static struct scsi_host_template mv5_sht = { .module = THIS_MODULE, @@ -539,6 +542,7 @@ static const struct ata_port_operations mv5_ops = { }; static const struct ata_port_operations mv6_ops = { + .dev_config = mv6_dev_config, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .check_status = ata_check_status, @@ -838,8 +842,15 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, * Inherited from caller. */ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, - struct mv_port_priv *pp) + struct mv_port_priv *pp, u8 protocol) { + int want_ncq = (protocol == ATA_PROT_NCQ); + + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); + if (want_ncq != using_ncq) + __mv_stop_dma(ap); + } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { struct mv_host_priv *hpriv = ap->host->private_data; int hard_port = mv_hardport_from_port(ap->port_no); @@ -859,7 +870,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, hc_mmio + HC_IRQ_CAUSE_OFS); } - mv_edma_cfg(ap, hpriv, port_mmio); + mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); /* clear FIS IRQ Cause */ writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); @@ -1045,8 +1056,19 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) return -EINVAL; } -static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, - void __iomem *port_mmio) +static void mv6_dev_config(struct ata_device *adev) +{ + /* + * We don't have hob_nsect when doing NCQ commands on Gen-II. + * See mv_qc_prep() for more info. + */ + if (adev->flags & ATA_DFLAG_NCQ) + if (adev->max_sectors > ATA_MAX_SECTORS) + adev->max_sectors = ATA_MAX_SECTORS; +} + +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq) { u32 cfg; @@ -1066,6 +1088,12 @@ static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ } + if (want_ncq) { + cfg |= EDMA_CFG_NCQ; + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; + } else + pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; + writelfl(cfg, port_mmio + EDMA_CFG_OFS); } @@ -1128,7 +1156,7 @@ static int mv_port_start(struct ata_port *ap) spin_lock_irqsave(&ap->host->lock, flags); - mv_edma_cfg(ap, hpriv, port_mmio); + mv_edma_cfg(pp, hpriv, port_mmio, 0); mv_set_edma_ptrs(port_mmio, hpriv, pp); @@ -1237,7 +1265,6 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1330,8 +1357,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- - what we use as our tag */ + flags |= qc->tag << CRQB_HOSTQ_SHIFT; /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1396,7 +1422,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } - mv_start_dma(ap, port_mmio, pp); + mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1572,23 +1598,17 @@ static void mv_intr_edma(struct ata_port *ap) * support for queueing. this works transparently for * queued and non-queued modes. */ - else if (IS_GEN_II(hpriv)) - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_6) & 0x3f; - - else /* IS_GEN_IIE */ - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_7) & 0x3f; + else + tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; qc = ata_qc_from_tag(ap, tag); - /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS - * bits (WARNING: might not necessarily be associated - * with this command), which -should- be clear - * if all is well + /* For non-NCQ mode, the lower 8 bits of status + * are from EDMA_ERR_IRQ_CAUSE_OFS, + * which should be zero if all went well. */ status = le16_to_cpu(pp->crpb[out_index].flags); - if (unlikely(status & 0xff)) { + if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { mv_err_intr(ap, qc); return; }