X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fata%2Fsata_mv.c;h=d15caf32045efcdb698661210168acf551e052f1;hb=f273827e2aadcf2f74a7bdc9ad715a1b20ea7dda;hp=7f1b13e89cf74c5cb75cd8f67b2a19abf816d88c;hpb=0b776eb5426752d4e53354ac89e3710d857e09a7;p=linux-2.6 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 7f1b13e89c..d15caf3204 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -164,10 +164,14 @@ enum { MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, - PCI_IRQ_CAUSE_OFS = 0x1d58, - PCI_IRQ_MASK_OFS = 0x1d5c, + PCI_IRQ_CAUSE_OFS = 0x1d58, + PCI_IRQ_MASK_OFS = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ + PCIE_IRQ_CAUSE_OFS = 0x1900, + PCIE_IRQ_MASK_OFS = 0x1910, + PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ + HC_MAIN_IRQ_CAUSE_OFS = 0x1d60, HC_MAIN_IRQ_MASK_OFS = 0x1d64, PORT0_ERR = (1 << 0), /* shift by port # */ @@ -206,6 +210,7 @@ enum { /* SATA registers */ SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ SATA_ACTIVE_OFS = 0x350, + SATA_FIS_IRQ_CAUSE_OFS = 0x364, PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, PHY_MODE2 = 0x330, @@ -218,11 +223,11 @@ enum { /* Port registers */ EDMA_CFG_OFS = 0, - EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ - EDMA_CFG_NCQ = (1 << 5), - EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ - EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ - EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ + EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ + EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_ERR_IRQ_CAUSE_OFS = 0x8, EDMA_ERR_IRQ_MASK_OFS = 0xc, @@ -240,14 +245,33 @@ enum { EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ + EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ - EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), + EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ + EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ + EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ + EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ + EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ + EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ + EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ + EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ + EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ + EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ + EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ EDMA_ERR_OVERRUN_5 = (1 << 5), EDMA_ERR_UNDERRUN_5 = (1 << 6), + + EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | + EDMA_ERR_LNK_CTRL_RX_1 | + EDMA_ERR_LNK_CTRL_RX_3 | + EDMA_ERR_LNK_CTRL_TX, + EDMA_EH_FREEZE = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | @@ -303,9 +327,11 @@ enum { MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ + MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ + MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ }; @@ -388,7 +414,15 @@ struct mv_port_signal { u32 pre; }; -struct mv_host_priv; +struct mv_host_priv { + u32 hp_flags; + struct mv_port_signal signal[8]; + const struct mv_hw_ops *ops; + u32 irq_cause_ofs; + u32 irq_mask_ofs; + u32 unmask_all_irqs; +}; + struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); @@ -401,12 +435,6 @@ struct mv_hw_ops { void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio); }; -struct mv_host_priv { - u32 hp_flags; - struct mv_port_signal signal[8]; - const struct mv_hw_ops *ops; -}; - static void mv_irq_clear(struct ata_port *ap); static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); @@ -421,6 +449,7 @@ static void mv_error_handler(struct ata_port *ap); static void mv_post_int_cmd(struct ata_queued_cmd *qc); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); +static void mv6_dev_config(struct ata_device *dev); static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, @@ -444,6 +473,9 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq); +static int __mv_stop_dma(struct ata_port *ap); static struct scsi_host_template mv5_sht = { .module = THIS_MODULE, @@ -510,6 +542,7 @@ static const struct ata_port_operations mv5_ops = { }; static const struct ata_port_operations mv6_ops = { + .dev_config = mv6_dev_config, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .check_status = ata_check_status, @@ -631,11 +664,13 @@ static const struct pci_device_id mv_pci_tbl[] = { /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, - { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, - - /* add Marvell 7042 support */ + /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, + /* Highpoint RocketRAID PCIe series */ + { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, + { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, + { } /* terminate list */ }; @@ -806,19 +841,46 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, * LOCKING: * Inherited from caller. */ -static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv, - struct mv_port_priv *pp) +static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, + struct mv_port_priv *pp, u8 protocol) { + int want_ncq = (protocol == ATA_PROT_NCQ); + + if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { + int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); + if (want_ncq != using_ncq) + __mv_stop_dma(ap); + } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { + struct mv_host_priv *hpriv = ap->host->private_data; + int hard_port = mv_hardport_from_port(ap->port_no); + void __iomem *hc_mmio = mv_hc_base_from_port( + ap->host->iomap[MV_PRIMARY_BAR], hard_port); + u32 hc_irq_cause, ipending; + /* clear EDMA event indicators, if any */ - writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS); + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + + /* clear EDMA interrupt indicator, if any */ + hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); + ipending = (DEV_IRQ << hard_port) | + (CRPB_DMA_DONE << hard_port); + if (hc_irq_cause & ipending) { + writelfl(hc_irq_cause & ~ipending, + hc_mmio + HC_IRQ_CAUSE_OFS); + } - mv_set_edma_ptrs(base, hpriv, pp); + mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); - writelfl(EDMA_EN, base + EDMA_CMD_OFS); + /* clear FIS IRQ Cause */ + writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); + + mv_set_edma_ptrs(port_mmio, hpriv, pp); + + writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); + WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); } /** @@ -845,7 +907,7 @@ static int __mv_stop_dma(struct ata_port *ap) pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; } else { WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); - } + } /* now properly wait for the eDMA to stop */ for (i = 1000; i > 0; i--) { @@ -883,7 +945,7 @@ static void mv_dump_mem(void __iomem *start, unsigned bytes) for (b = 0; b < bytes; ) { DPRINTK("%p: ", start + b); for (w = 0; b < bytes && w < 4; w++) { - printk("%08x ",readl(start + b)); + printk("%08x ", readl(start + b)); b += sizeof(u32); } printk("\n"); @@ -899,8 +961,8 @@ static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) for (b = 0; b < bytes; ) { DPRINTK("%02x: ", b); for (w = 0; b < bytes && w < 4; w++) { - (void) pci_read_config_dword(pdev,b,&dw); - printk("%08x ",dw); + (void) pci_read_config_dword(pdev, b, &dw); + printk("%08x ", dw); b += sizeof(u32); } printk("\n"); @@ -944,9 +1006,9 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port, } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); - DPRINTK("EDMA regs (port %i):\n",p); + DPRINTK("EDMA regs (port %i):\n", p); mv_dump_mem(port_base, 0x54); - DPRINTK("SATA regs (port %i):\n",p); + DPRINTK("SATA regs (port %i):\n", p); mv_dump_mem(port_base+0x300, 0x60); } #endif @@ -994,35 +1056,44 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) return -EINVAL; } -static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, - void __iomem *port_mmio) +static void mv6_dev_config(struct ata_device *adev) +{ + /* + * We don't have hob_nsect when doing NCQ commands on Gen-II. + * See mv_qc_prep() for more info. + */ + if (adev->flags & ATA_DFLAG_NCQ) + if (adev->max_sectors > ATA_MAX_SECTORS) + adev->max_sectors = ATA_MAX_SECTORS; +} + +static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, + void __iomem *port_mmio, int want_ncq) { - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); + u32 cfg; /* set up non-NCQ EDMA configuration */ - cfg &= ~(1 << 9); /* disable eQue */ + cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ - if (IS_GEN_I(hpriv)) { - cfg &= ~0x1f; /* clear queue depth */ + if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ - } - else if (IS_GEN_II(hpriv)) { - cfg &= ~0x1f; /* clear queue depth */ + else if (IS_GEN_II(hpriv)) cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; - cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */ - } else if (IS_GEN_IIE(hpriv)) { cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ cfg |= (1 << 22); /* enab 4-entry host queue cache */ - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ cfg |= (1 << 18); /* enab early completion */ cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ - cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */ } + if (want_ncq) { + cfg |= EDMA_CFG_NCQ; + pp->pp_flags |= MV_PP_FLAG_NCQ_EN; + } else + pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; + writelfl(cfg, port_mmio + EDMA_CFG_OFS); } @@ -1085,7 +1156,7 @@ static int mv_port_start(struct ata_port *ap) spin_lock_irqsave(&ap->host->lock, flags); - mv_edma_cfg(ap, hpriv, port_mmio); + mv_edma_cfg(pp, hpriv, port_mmio, 0); mv_set_edma_ptrs(port_mmio, hpriv, pp); @@ -1127,9 +1198,10 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) struct mv_port_priv *pp = qc->ap->private_data; struct scatterlist *sg; struct mv_sg *mv_sg, *last_sg = NULL; + unsigned int si; mv_sg = pp->sg_tbl; - ata_for_each_sg(sg, qc) { + for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); @@ -1156,7 +1228,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); } -static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) +static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); @@ -1184,7 +1256,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) u16 flags = 0; unsigned in_index; - if (qc->tf.protocol != ATA_PROT_DMA) + if (qc->tf.protocol != ATA_PROT_DMA) return; /* Fill in command request block @@ -1193,7 +1265,6 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/ /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1276,7 +1347,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) unsigned in_index; u32 flags = 0; - if (qc->tf.protocol != ATA_PROT_DMA) + if (qc->tf.protocol != ATA_PROT_DMA) return; /* Fill in Gen IIE command request block @@ -1286,8 +1357,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really- - what we use as our tag */ + flags |= qc->tag << CRQB_HOSTQ_SHIFT; /* get current queue index from software */ in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1341,7 +1411,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; - struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; if (qc->tf.protocol != ATA_PROT_DMA) { @@ -1353,7 +1422,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } - mv_start_dma(port_mmio, hpriv, pp); + mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; @@ -1427,6 +1496,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? "dev disconnect" : "dev connect"); + action |= ATA_EH_HARDRESET; } if (IS_GEN_I(hpriv)) { @@ -1455,7 +1525,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) } /* Clear EDMA now that SERR cleanup done */ - writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); if (!err_mask) { err_mask = AC_ERR_OTHER; @@ -1528,23 +1598,17 @@ static void mv_intr_edma(struct ata_port *ap) * support for queueing. this works transparently for * queued and non-queued modes. */ - else if (IS_GEN_II(hpriv)) - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_6) & 0x3f; - - else /* IS_GEN_IIE */ - tag = (le16_to_cpu(pp->crpb[out_index].id) - >> CRPB_IOID_SHIFT_7) & 0x3f; + else + tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; qc = ata_qc_from_tag(ap, tag); - /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS - * bits (WARNING: might not necessarily be associated - * with this command), which -should- be clear - * if all is well + /* For non-NCQ mode, the lower 8 bits of status + * are from EDMA_ERR_IRQ_CAUSE_OFS, + * which should be zero if all went well. */ status = le16_to_cpu(pp->crpb[out_index].flags); - if (unlikely(status & 0xff)) { + if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { mv_err_intr(ap, qc); return; } @@ -1606,7 +1670,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", - hc,relevant,hc_irq_cause); + hc, relevant, hc_irq_cause); for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { struct ata_port *ap = host->ports[port]; @@ -1648,13 +1712,14 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) static void mv_pci_error(struct ata_host *host, void __iomem *mmio) { + struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; - err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS); + err_cause = readl(mmio + hpriv->irq_cause_ofs); dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); @@ -1662,7 +1727,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio) DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; @@ -1704,18 +1769,19 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) struct ata_host *host = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host->iomap[MV_PRIMARY_BAR]; - u32 irq_stat; + u32 irq_stat, irq_mask; + spin_lock(&host->lock); irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); + irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS); /* check the cases where we either have nothing pending or have read * a bogus register value which can indicate HW removal or PCI fault */ - if (!irq_stat || (0xffffffffU == irq_stat)) - return IRQ_NONE; + if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) + goto out_unlock; n_hcs = mv_get_hc_count(host->ports[0]->flags); - spin_lock(&host->lock); if (unlikely(irq_stat & PCI_ERR)) { mv_pci_error(host, mmio); @@ -1926,6 +1992,8 @@ static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) { + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); @@ -1937,8 +2005,8 @@ static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio) writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(HC_MAIN_IRQ_MASK_OFS); ZERO(MV_PCI_SERR_MASK); - ZERO(PCI_IRQ_CAUSE_OFS); - ZERO(PCI_IRQ_MASK_OFS); + ZERO(hpriv->irq_cause_ofs); + ZERO(hpriv->irq_mask_ofs); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); @@ -1983,9 +2051,8 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, for (i = 0; i < 1000; i++) { udelay(1); t = readl(reg); - if (PCI_MASTER_EMPTY & t) { + if (PCI_MASTER_EMPTY & t) break; - } } if (!(PCI_MASTER_EMPTY & t)) { printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); @@ -2171,7 +2238,7 @@ static void mv_phy_reset(struct ata_port *ap, unsigned int *class, mv_scr_read(ap, SCR_ERROR, &serror); mv_scr_read(ap, SCR_CONTROL, &scontrol); DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " - "SCtrl 0x%08x\n", status, serror, scontrol); + "SCtrl 0x%08x\n", sstatus, serror, scontrol); } #endif @@ -2415,8 +2482,8 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); - /* unmask all EDMA error interrupts */ - writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); + /* unmask all non-transient EDMA error interrupts */ + writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", readl(port_mmio + EDMA_CFG_OFS), @@ -2430,7 +2497,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) struct mv_host_priv *hpriv = host->private_data; u32 hp_flags = hpriv->hp_flags; - switch(board_idx) { + switch (board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; @@ -2491,6 +2558,36 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; case chip_7042: + hp_flags |= MV_HP_PCIE; + if (pdev->vendor == PCI_VENDOR_ID_TTI && + (pdev->device == 0x2300 || pdev->device == 0x2310)) + { + /* + * Highpoint RocketRAID PCIe 23xx series cards: + * + * Unconfigured drives are treated as "Legacy" + * by the BIOS, and it overwrites sector 8 with + * a "Lgcy" metadata block prior to Linux boot. + * + * Configured drives (RAID or JBOD) leave sector 8 + * alone, but instead overwrite a high numbered + * sector for the RAID metadata. This sector can + * be determined exactly, by truncating the physical + * drive capacity to a nice even GB value. + * + * RAID metadata is at: (dev->n_sectors & ~0xfffff) + * + * Warn the user, lest they think we're just buggy. + */ + printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" + " BIOS CORRUPTS DATA on all attached drives," + " regardless of if/how they are configured." + " BEWARE!\n"); + printk(KERN_WARNING DRV_NAME ": For data safety, do not" + " use sectors 8-9 on \"Legacy\" drives," + " and avoid the final two gigabytes on" + " all RocketRAID BIOS initialized drives.\n"); + } case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; @@ -2511,11 +2608,21 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) break; default: - printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); + dev_printk(KERN_ERR, &pdev->dev, + "BUG: invalid board index %u\n", board_idx); return 1; } hpriv->hp_flags = hp_flags; + if (hp_flags & MV_HP_PCIE) { + hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; + } else { + hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS; + hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS; + hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; + } return 0; } @@ -2595,10 +2702,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) } /* Clear any currently outstanding host interrupt conditions */ - writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + writelfl(0, mmio + hpriv->irq_cause_ofs); /* and unmask interrupt generation for host regs */ - writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); + writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); if (IS_GEN_I(hpriv)) writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS); @@ -2609,8 +2716,8 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) "PCI int cause/mask=0x%08x/0x%08x\n", readl(mmio + HC_MAIN_IRQ_CAUSE_OFS), readl(mmio + HC_MAIN_IRQ_MASK_OFS), - readl(mmio + PCI_IRQ_CAUSE_OFS), - readl(mmio + PCI_IRQ_MASK_OFS)); + readl(mmio + hpriv->irq_cause_ofs), + readl(mmio + hpriv->irq_mask_ofs)); done: return rc; @@ -2668,7 +2775,7 @@ static void mv_print_info(struct ata_host *host) */ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int printed_version = 0; + static int printed_version; unsigned int board_idx = (unsigned int)ent->driver_data; const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; struct ata_host *host;