2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
150 CRPB_FLAG_STATUS_SHIFT = 8,
151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
156 /* PCI interface registers */
158 PCI_COMMAND_OFS = 0xc00,
160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
211 /* SATAHC registers */
214 HC_IRQ_CAUSE_OFS = 0x14,
215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
219 /* Shadow block registers */
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
238 SATA_INTERFACE_CFG = 0x050,
240 MV_M2_PREAMP_MASK = 0x7e0,
244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
331 EDMA_RSP_Q_PTR_SHIFT = 3,
333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
338 EDMA_IORDY_TMOUT = 0x34,
341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
347 MV_HP_ERRATA_XX42A0 = (1 << 5),
348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
353 /* Port private flags (pp_flags) */
354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
358 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
360 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
361 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
367 MV_DMA_BOUNDARY = 0xffffU,
369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
374 /* ditto, for response queue */
375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
389 /* Command ReQuest Block: 32B */
405 /* Command ResPonse Block: 8B */
412 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
420 struct mv_port_priv {
421 struct mv_crqb *crqb;
423 struct mv_crpb *crpb;
425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
428 unsigned int req_idx;
429 unsigned int resp_idx;
434 struct mv_port_signal {
439 struct mv_host_priv {
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
472 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
476 static int mv_port_start(struct ata_port *ap);
477 static void mv_port_stop(struct ata_port *ap);
478 static void mv_qc_prep(struct ata_queued_cmd *qc);
479 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
480 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
481 static int mv_prereset(struct ata_link *link, unsigned long deadline);
482 static int mv_hardreset(struct ata_link *link, unsigned int *class,
483 unsigned long deadline);
484 static void mv_postreset(struct ata_link *link, unsigned int *classes);
485 static void mv_eh_freeze(struct ata_port *ap);
486 static void mv_eh_thaw(struct ata_port *ap);
487 static void mv6_dev_config(struct ata_device *dev);
489 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
491 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
494 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
496 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
499 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
501 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
502 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
504 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
506 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
509 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
511 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
512 void __iomem *mmio, unsigned int n_hc);
513 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
515 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
516 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
517 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
518 unsigned int port_no);
519 static int mv_stop_edma(struct ata_port *ap);
520 static int mv_stop_edma_engine(struct ata_port *ap);
521 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
523 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
524 * because we have to allow room for worst case splitting of
525 * PRDs for 64K boundaries in mv_fill_sg().
527 static struct scsi_host_template mv5_sht = {
528 ATA_BASE_SHT(DRV_NAME),
529 .sg_tablesize = MV_MAX_SG_CT / 2,
530 .dma_boundary = MV_DMA_BOUNDARY,
533 static struct scsi_host_template mv6_sht = {
534 ATA_NCQ_SHT(DRV_NAME),
535 .can_queue = MV_MAX_Q_DEPTH - 1,
536 .sg_tablesize = MV_MAX_SG_CT / 2,
537 .dma_boundary = MV_DMA_BOUNDARY,
540 static struct ata_port_operations mv5_ops = {
541 .inherits = &ata_sff_port_ops,
543 .qc_prep = mv_qc_prep,
544 .qc_issue = mv_qc_issue,
546 .freeze = mv_eh_freeze,
548 .prereset = mv_prereset,
549 .hardreset = mv_hardreset,
550 .postreset = mv_postreset,
551 .error_handler = ata_std_error_handler, /* avoid SFF EH */
552 .post_internal_cmd = ATA_OP_NULL,
554 .scr_read = mv5_scr_read,
555 .scr_write = mv5_scr_write,
557 .port_start = mv_port_start,
558 .port_stop = mv_port_stop,
561 static struct ata_port_operations mv6_ops = {
562 .inherits = &mv5_ops,
563 .qc_defer = ata_std_qc_defer,
564 .dev_config = mv6_dev_config,
565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
569 static struct ata_port_operations mv_iie_ops = {
570 .inherits = &mv6_ops,
571 .dev_config = ATA_OP_NULL,
572 .qc_prep = mv_qc_prep_iie,
575 static const struct ata_port_info mv_port_info[] = {
577 .flags = MV_COMMON_FLAGS,
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv5_ops,
583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
597 .pio_mask = 0x1f, /* pio0-4 */
598 .udma_mask = ATA_UDMA6,
599 .port_ops = &mv6_ops,
602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
604 .pio_mask = 0x1f, /* pio0-4 */
605 .udma_mask = ATA_UDMA6,
606 .port_ops = &mv6_ops,
609 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
611 .pio_mask = 0x1f, /* pio0-4 */
612 .udma_mask = ATA_UDMA6,
613 .port_ops = &mv_iie_ops,
616 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
618 .pio_mask = 0x1f, /* pio0-4 */
619 .udma_mask = ATA_UDMA6,
620 .port_ops = &mv_iie_ops,
623 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
630 static const struct pci_device_id mv_pci_tbl[] = {
631 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
632 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
633 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
634 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
635 /* RocketRAID 1740/174x have different identifiers */
636 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
637 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
639 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
640 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
641 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
642 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
643 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
645 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
648 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
650 /* Marvell 7042 support */
651 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
653 /* Highpoint RocketRAID PCIe series */
654 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
655 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
657 { } /* terminate list */
660 static const struct mv_hw_ops mv5xxx_ops = {
661 .phy_errata = mv5_phy_errata,
662 .enable_leds = mv5_enable_leds,
663 .read_preamp = mv5_read_preamp,
664 .reset_hc = mv5_reset_hc,
665 .reset_flash = mv5_reset_flash,
666 .reset_bus = mv5_reset_bus,
669 static const struct mv_hw_ops mv6xxx_ops = {
670 .phy_errata = mv6_phy_errata,
671 .enable_leds = mv6_enable_leds,
672 .read_preamp = mv6_read_preamp,
673 .reset_hc = mv6_reset_hc,
674 .reset_flash = mv6_reset_flash,
675 .reset_bus = mv_reset_pci_bus,
678 static const struct mv_hw_ops mv_soc_ops = {
679 .phy_errata = mv6_phy_errata,
680 .enable_leds = mv_soc_enable_leds,
681 .read_preamp = mv_soc_read_preamp,
682 .reset_hc = mv_soc_reset_hc,
683 .reset_flash = mv_soc_reset_flash,
684 .reset_bus = mv_soc_reset_bus,
691 static inline void writelfl(unsigned long data, void __iomem *addr)
694 (void) readl(addr); /* flush to avoid PCI posted write */
697 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
699 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
702 static inline unsigned int mv_hc_from_port(unsigned int port)
704 return port >> MV_PORT_HC_SHIFT;
707 static inline unsigned int mv_hardport_from_port(unsigned int port)
709 return port & MV_PORT_MASK;
712 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
715 return mv_hc_base(base, mv_hc_from_port(port));
718 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
720 return mv_hc_base_from_port(base, port) +
721 MV_SATAHC_ARBTR_REG_SZ +
722 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
725 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
727 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
728 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
730 return hc_mmio + ofs;
733 static inline void __iomem *mv_host_base(struct ata_host *host)
735 struct mv_host_priv *hpriv = host->private_data;
739 static inline void __iomem *mv_ap_base(struct ata_port *ap)
741 return mv_port_base(mv_host_base(ap->host), ap->port_no);
744 static inline int mv_get_hc_count(unsigned long port_flags)
746 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
749 static void mv_set_edma_ptrs(void __iomem *port_mmio,
750 struct mv_host_priv *hpriv,
751 struct mv_port_priv *pp)
756 * initialize request queue
758 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
760 WARN_ON(pp->crqb_dma & 0x3ff);
761 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
762 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
763 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
765 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
766 writelfl((pp->crqb_dma & 0xffffffff) | index,
767 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
769 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
772 * initialize response queue
774 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
776 WARN_ON(pp->crpb_dma & 0xff);
777 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
780 writelfl((pp->crpb_dma & 0xffffffff) | index,
781 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
783 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
785 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
786 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
790 * mv_start_dma - Enable eDMA engine
791 * @base: port base address
792 * @pp: port private data
794 * Verify the local cache of the eDMA state is accurate with a
798 * Inherited from caller.
800 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
801 struct mv_port_priv *pp, u8 protocol)
803 int want_ncq = (protocol == ATA_PROT_NCQ);
805 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
806 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
807 if (want_ncq != using_ncq)
808 mv_stop_edma_engine(ap);
810 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
811 struct mv_host_priv *hpriv = ap->host->private_data;
812 int hard_port = mv_hardport_from_port(ap->port_no);
813 void __iomem *hc_mmio = mv_hc_base_from_port(
814 mv_host_base(ap->host), hard_port);
815 u32 hc_irq_cause, ipending;
817 /* clear EDMA event indicators, if any */
818 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
820 /* clear EDMA interrupt indicator, if any */
821 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
822 ipending = (DEV_IRQ << hard_port) |
823 (CRPB_DMA_DONE << hard_port);
824 if (hc_irq_cause & ipending) {
825 writelfl(hc_irq_cause & ~ipending,
826 hc_mmio + HC_IRQ_CAUSE_OFS);
829 mv_edma_cfg(ap, want_ncq);
831 /* clear FIS IRQ Cause */
832 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
834 mv_set_edma_ptrs(port_mmio, hpriv, pp);
836 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
837 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
839 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
843 * mv_stop_edma_engine - Disable eDMA engine
844 * @ap: ATA channel to manipulate
846 * Verify the local cache of the eDMA state is accurate with a
850 * Inherited from caller.
852 static int mv_stop_edma_engine(struct ata_port *ap)
854 void __iomem *port_mmio = mv_ap_base(ap);
855 struct mv_port_priv *pp = ap->private_data;
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 /* Disable EDMA if active. The disable bit auto clears.
862 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
863 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
865 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
868 /* now properly wait for the eDMA to stop */
869 for (i = 1000; i > 0; i--) {
870 reg = readl(port_mmio + EDMA_CMD_OFS);
871 if (!(reg & EDMA_EN))
878 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
885 static int mv_stop_edma(struct ata_port *ap)
890 spin_lock_irqsave(&ap->host->lock, flags);
891 rc = mv_stop_edma_engine(ap);
892 spin_unlock_irqrestore(&ap->host->lock, flags);
898 static void mv_dump_mem(void __iomem *start, unsigned bytes)
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%p: ", start + b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 printk("%08x ", readl(start + b));
912 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
917 for (b = 0; b < bytes; ) {
918 DPRINTK("%02x: ", b);
919 for (w = 0; b < bytes && w < 4; w++) {
920 (void) pci_read_config_dword(pdev, b, &dw);
928 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
929 struct pci_dev *pdev)
932 void __iomem *hc_base = mv_hc_base(mmio_base,
933 port >> MV_PORT_HC_SHIFT);
934 void __iomem *port_base;
935 int start_port, num_ports, p, start_hc, num_hcs, hc;
938 start_hc = start_port = 0;
939 num_ports = 8; /* shld be benign for 4 port devs */
942 start_hc = port >> MV_PORT_HC_SHIFT;
944 num_ports = num_hcs = 1;
946 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
947 num_ports > 1 ? num_ports - 1 : start_port);
950 DPRINTK("PCI config space regs:\n");
951 mv_dump_pci_cfg(pdev, 0x68);
953 DPRINTK("PCI regs:\n");
954 mv_dump_mem(mmio_base+0xc00, 0x3c);
955 mv_dump_mem(mmio_base+0xd00, 0x34);
956 mv_dump_mem(mmio_base+0xf00, 0x4);
957 mv_dump_mem(mmio_base+0x1d00, 0x6c);
958 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
959 hc_base = mv_hc_base(mmio_base, hc);
960 DPRINTK("HC regs (HC %i):\n", hc);
961 mv_dump_mem(hc_base, 0x1c);
963 for (p = start_port; p < start_port + num_ports; p++) {
964 port_base = mv_port_base(mmio_base, p);
965 DPRINTK("EDMA regs (port %i):\n", p);
966 mv_dump_mem(port_base, 0x54);
967 DPRINTK("SATA regs (port %i):\n", p);
968 mv_dump_mem(port_base+0x300, 0x60);
973 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
981 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
984 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
993 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
995 unsigned int ofs = mv_scr_offset(sc_reg_in);
997 if (ofs != 0xffffffffU) {
998 *val = readl(mv_ap_base(ap) + ofs);
1004 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1006 unsigned int ofs = mv_scr_offset(sc_reg_in);
1008 if (ofs != 0xffffffffU) {
1009 writelfl(val, mv_ap_base(ap) + ofs);
1015 static void mv6_dev_config(struct ata_device *adev)
1018 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1019 * See mv_qc_prep() for more info.
1021 if (adev->flags & ATA_DFLAG_NCQ)
1022 if (adev->max_sectors > ATA_MAX_SECTORS)
1023 adev->max_sectors = ATA_MAX_SECTORS;
1026 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1029 struct mv_port_priv *pp = ap->private_data;
1030 struct mv_host_priv *hpriv = ap->host->private_data;
1031 void __iomem *port_mmio = mv_ap_base(ap);
1033 /* set up non-NCQ EDMA configuration */
1034 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1036 if (IS_GEN_I(hpriv))
1037 cfg |= (1 << 8); /* enab config burst size mask */
1039 else if (IS_GEN_II(hpriv))
1040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1042 else if (IS_GEN_IIE(hpriv)) {
1043 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1044 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1045 cfg |= (1 << 18); /* enab early completion */
1046 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1050 cfg |= EDMA_CFG_NCQ;
1051 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1053 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1055 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1058 static void mv_port_free_dma_mem(struct ata_port *ap)
1060 struct mv_host_priv *hpriv = ap->host->private_data;
1061 struct mv_port_priv *pp = ap->private_data;
1065 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1069 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1073 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1074 * For later hardware, we have one unique sg_tbl per NCQ tag.
1076 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1077 if (pp->sg_tbl[tag]) {
1078 if (tag == 0 || !IS_GEN_I(hpriv))
1079 dma_pool_free(hpriv->sg_tbl_pool,
1081 pp->sg_tbl_dma[tag]);
1082 pp->sg_tbl[tag] = NULL;
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1091 * Allocate and point to DMA memory, init port private memory,
1095 * Inherited from caller.
1097 static int mv_port_start(struct ata_port *ap)
1099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
1101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 unsigned long flags;
1106 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1109 ap->private_data = pp;
1111 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1114 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1116 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1118 goto out_port_free_dma_mem;
1119 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1122 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1123 * For later hardware, we need one unique sg_tbl per NCQ tag.
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (tag == 0 || !IS_GEN_I(hpriv)) {
1127 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1128 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1129 if (!pp->sg_tbl[tag])
1130 goto out_port_free_dma_mem;
1132 pp->sg_tbl[tag] = pp->sg_tbl[0];
1133 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1137 spin_lock_irqsave(&ap->host->lock, flags);
1140 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1142 spin_unlock_irqrestore(&ap->host->lock, flags);
1144 /* Don't turn on EDMA here...do it before DMA commands only. Else
1145 * we'll be unable to send non-data, PIO, etc due to restricted access
1150 out_port_free_dma_mem:
1151 mv_port_free_dma_mem(ap);
1156 * mv_port_stop - Port specific cleanup/stop routine.
1157 * @ap: ATA channel to manipulate
1159 * Stop DMA, cleanup port memory.
1162 * This routine uses the host lock to protect the DMA stop.
1164 static void mv_port_stop(struct ata_port *ap)
1167 mv_port_free_dma_mem(ap);
1171 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1172 * @qc: queued command whose SG list to source from
1174 * Populate the SG list and mark the last entry.
1177 * Inherited from caller.
1179 static void mv_fill_sg(struct ata_queued_cmd *qc)
1181 struct mv_port_priv *pp = qc->ap->private_data;
1182 struct scatterlist *sg;
1183 struct mv_sg *mv_sg, *last_sg = NULL;
1186 mv_sg = pp->sg_tbl[qc->tag];
1187 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1188 dma_addr_t addr = sg_dma_address(sg);
1189 u32 sg_len = sg_dma_len(sg);
1192 u32 offset = addr & 0xffff;
1195 if ((offset + sg_len > 0x10000))
1196 len = 0x10000 - offset;
1198 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1199 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1200 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1210 if (likely(last_sg))
1211 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1214 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1216 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1217 (last ? CRQB_CMD_LAST : 0);
1218 *cmdw = cpu_to_le16(tmp);
1222 * mv_qc_prep - Host specific command preparation.
1223 * @qc: queued command to prepare
1225 * This routine simply redirects to the general purpose routine
1226 * if command is not DMA. Else, it handles prep of the CRQB
1227 * (command request block), does some sanity checking, and calls
1228 * the SG load routine.
1231 * Inherited from caller.
1233 static void mv_qc_prep(struct ata_queued_cmd *qc)
1235 struct ata_port *ap = qc->ap;
1236 struct mv_port_priv *pp = ap->private_data;
1238 struct ata_taskfile *tf;
1242 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1243 (qc->tf.protocol != ATA_PROT_NCQ))
1246 /* Fill in command request block
1248 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1249 flags |= CRQB_FLAG_READ;
1250 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1251 flags |= qc->tag << CRQB_TAG_SHIFT;
1253 /* get current queue index from software */
1254 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1256 pp->crqb[in_index].sg_addr =
1257 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1258 pp->crqb[in_index].sg_addr_hi =
1259 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1260 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1262 cw = &pp->crqb[in_index].ata_cmd[0];
1265 /* Sadly, the CRQB cannot accomodate all registers--there are
1266 * only 11 bytes...so we must pick and choose required
1267 * registers based on the command. So, we drop feature and
1268 * hob_feature for [RW] DMA commands, but they are needed for
1269 * NCQ. NCQ will drop hob_nsect.
1271 switch (tf->command) {
1273 case ATA_CMD_READ_EXT:
1275 case ATA_CMD_WRITE_EXT:
1276 case ATA_CMD_WRITE_FUA_EXT:
1277 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1279 case ATA_CMD_FPDMA_READ:
1280 case ATA_CMD_FPDMA_WRITE:
1281 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1282 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1285 /* The only other commands EDMA supports in non-queued and
1286 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1287 * of which are defined/used by Linux. If we get here, this
1288 * driver needs work.
1290 * FIXME: modify libata to give qc_prep a return value and
1291 * return error here.
1293 BUG_ON(tf->command);
1296 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1297 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1298 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1299 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1300 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1301 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1302 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1303 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1304 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1306 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1312 * mv_qc_prep_iie - Host specific command preparation.
1313 * @qc: queued command to prepare
1315 * This routine simply redirects to the general purpose routine
1316 * if command is not DMA. Else, it handles prep of the CRQB
1317 * (command request block), does some sanity checking, and calls
1318 * the SG load routine.
1321 * Inherited from caller.
1323 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1325 struct ata_port *ap = qc->ap;
1326 struct mv_port_priv *pp = ap->private_data;
1327 struct mv_crqb_iie *crqb;
1328 struct ata_taskfile *tf;
1332 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1333 (qc->tf.protocol != ATA_PROT_NCQ))
1336 /* Fill in Gen IIE command request block */
1337 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1338 flags |= CRQB_FLAG_READ;
1340 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1341 flags |= qc->tag << CRQB_TAG_SHIFT;
1342 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1344 /* get current queue index from software */
1345 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1347 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1348 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1349 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1350 crqb->flags = cpu_to_le32(flags);
1353 crqb->ata_cmd[0] = cpu_to_le32(
1354 (tf->command << 16) |
1357 crqb->ata_cmd[1] = cpu_to_le32(
1363 crqb->ata_cmd[2] = cpu_to_le32(
1364 (tf->hob_lbal << 0) |
1365 (tf->hob_lbam << 8) |
1366 (tf->hob_lbah << 16) |
1367 (tf->hob_feature << 24)
1369 crqb->ata_cmd[3] = cpu_to_le32(
1371 (tf->hob_nsect << 8)
1374 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1380 * mv_qc_issue - Initiate a command to the host
1381 * @qc: queued command to start
1383 * This routine simply redirects to the general purpose routine
1384 * if command is not DMA. Else, it sanity checks our local
1385 * caches of the request producer/consumer indices then enables
1386 * DMA and bumps the request producer index.
1389 * Inherited from caller.
1391 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1393 struct ata_port *ap = qc->ap;
1394 void __iomem *port_mmio = mv_ap_base(ap);
1395 struct mv_port_priv *pp = ap->private_data;
1398 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1399 (qc->tf.protocol != ATA_PROT_NCQ)) {
1400 /* We're about to send a non-EDMA capable command to the
1401 * port. Turn off EDMA so there won't be problems accessing
1402 * shadow block, etc registers.
1404 mv_stop_edma_engine(ap);
1405 return ata_qc_issue_prot(qc);
1408 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1412 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1414 /* and write the request in pointer to kick the EDMA to life */
1415 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1416 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1422 * mv_err_intr - Handle error interrupts on the port
1423 * @ap: ATA channel to manipulate
1424 * @reset_allowed: bool: 0 == don't trigger from reset here
1426 * In most cases, just clear the interrupt and move on. However,
1427 * some cases require an eDMA reset, which also performs a COMRESET.
1428 * The SERR case requires a clear of pending errors in the SATA
1429 * SERROR register. Finally, if the port disabled DMA,
1430 * update our cached copy to match.
1433 * Inherited from caller.
1435 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1437 void __iomem *port_mmio = mv_ap_base(ap);
1438 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1439 struct mv_port_priv *pp = ap->private_data;
1440 struct mv_host_priv *hpriv = ap->host->private_data;
1441 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1442 unsigned int action = 0, err_mask = 0;
1443 struct ata_eh_info *ehi = &ap->link.eh_info;
1445 ata_ehi_clear_desc(ehi);
1447 if (!edma_enabled) {
1448 /* just a guess: do we need to do this? should we
1449 * expand this, and do it in all cases?
1451 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1452 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1455 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1457 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1460 * all generations share these EDMA error cause bits
1463 if (edma_err_cause & EDMA_ERR_DEV)
1464 err_mask |= AC_ERR_DEV;
1465 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1466 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1467 EDMA_ERR_INTRL_PAR)) {
1468 err_mask |= AC_ERR_ATA_BUS;
1469 action |= ATA_EH_RESET;
1470 ata_ehi_push_desc(ehi, "parity error");
1472 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1473 ata_ehi_hotplugged(ehi);
1474 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1475 "dev disconnect" : "dev connect");
1476 action |= ATA_EH_RESET;
1479 if (IS_GEN_I(hpriv)) {
1480 eh_freeze_mask = EDMA_EH_FREEZE_5;
1482 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1483 pp = ap->private_data;
1484 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1485 ata_ehi_push_desc(ehi, "EDMA self-disable");
1488 eh_freeze_mask = EDMA_EH_FREEZE;
1490 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1491 pp = ap->private_data;
1492 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1493 ata_ehi_push_desc(ehi, "EDMA self-disable");
1496 if (edma_err_cause & EDMA_ERR_SERR) {
1497 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1498 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1499 err_mask = AC_ERR_ATA_BUS;
1500 action |= ATA_EH_RESET;
1504 /* Clear EDMA now that SERR cleanup done */
1505 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1508 err_mask = AC_ERR_OTHER;
1509 action |= ATA_EH_RESET;
1512 ehi->serror |= serr;
1513 ehi->action |= action;
1516 qc->err_mask |= err_mask;
1518 ehi->err_mask |= err_mask;
1520 if (edma_err_cause & eh_freeze_mask)
1521 ata_port_freeze(ap);
1526 static void mv_intr_pio(struct ata_port *ap)
1528 struct ata_queued_cmd *qc;
1531 /* ignore spurious intr if drive still BUSY */
1532 ata_status = readb(ap->ioaddr.status_addr);
1533 if (unlikely(ata_status & ATA_BUSY))
1536 /* get active ATA command */
1537 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1538 if (unlikely(!qc)) /* no active tag */
1540 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1543 /* and finally, complete the ATA command */
1544 qc->err_mask |= ac_err_mask(ata_status);
1545 ata_qc_complete(qc);
1548 static void mv_intr_edma(struct ata_port *ap)
1550 void __iomem *port_mmio = mv_ap_base(ap);
1551 struct mv_host_priv *hpriv = ap->host->private_data;
1552 struct mv_port_priv *pp = ap->private_data;
1553 struct ata_queued_cmd *qc;
1554 u32 out_index, in_index;
1555 bool work_done = false;
1557 /* get h/w response queue pointer */
1558 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1559 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1565 /* get s/w response queue last-read pointer, and compare */
1566 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1567 if (in_index == out_index)
1570 /* 50xx: get active ATA command */
1571 if (IS_GEN_I(hpriv))
1572 tag = ap->link.active_tag;
1574 /* Gen II/IIE: get active ATA command via tag, to enable
1575 * support for queueing. this works transparently for
1576 * queued and non-queued modes.
1579 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1581 qc = ata_qc_from_tag(ap, tag);
1583 /* For non-NCQ mode, the lower 8 bits of status
1584 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1585 * which should be zero if all went well.
1587 status = le16_to_cpu(pp->crpb[out_index].flags);
1588 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1589 mv_err_intr(ap, qc);
1593 /* and finally, complete the ATA command */
1596 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1597 ata_qc_complete(qc);
1600 /* advance software response queue pointer, to
1601 * indicate (after the loop completes) to hardware
1602 * that we have consumed a response queue entry.
1609 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1610 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1611 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1615 * mv_host_intr - Handle all interrupts on the given host controller
1616 * @host: host specific structure
1617 * @relevant: port error bits relevant to this host controller
1618 * @hc: which host controller we're to look at
1620 * Read then write clear the HC interrupt status then walk each
1621 * port connected to the HC and see if it needs servicing. Port
1622 * success ints are reported in the HC interrupt status reg, the
1623 * port error ints are reported in the higher level main
1624 * interrupt status register and thus are passed in via the
1625 * 'relevant' argument.
1628 * Inherited from caller.
1630 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1632 struct mv_host_priv *hpriv = host->private_data;
1633 void __iomem *mmio = hpriv->base;
1634 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1636 int port, port0, last_port;
1641 port0 = MV_PORTS_PER_HC;
1644 last_port = port0 + MV_PORTS_PER_HC;
1646 last_port = port0 + hpriv->n_ports;
1647 /* we'll need the HC success int register in most cases */
1648 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1652 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1654 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1655 hc, relevant, hc_irq_cause);
1657 for (port = port0; port < last_port; port++) {
1658 struct ata_port *ap = host->ports[port];
1659 struct mv_port_priv *pp;
1660 int have_err_bits, hard_port, shift;
1662 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1665 pp = ap->private_data;
1667 shift = port << 1; /* (port * 2) */
1668 if (port >= MV_PORTS_PER_HC)
1669 shift++; /* skip bit 8 in the HC Main IRQ reg */
1671 have_err_bits = ((PORT0_ERR << shift) & relevant);
1673 if (unlikely(have_err_bits)) {
1674 struct ata_queued_cmd *qc;
1676 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1677 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1680 mv_err_intr(ap, qc);
1684 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1686 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1687 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1690 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1697 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1699 struct mv_host_priv *hpriv = host->private_data;
1700 struct ata_port *ap;
1701 struct ata_queued_cmd *qc;
1702 struct ata_eh_info *ehi;
1703 unsigned int i, err_mask, printed = 0;
1706 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1708 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1711 DPRINTK("All regs @ PCI error\n");
1712 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1714 writelfl(0, mmio + hpriv->irq_cause_ofs);
1716 for (i = 0; i < host->n_ports; i++) {
1717 ap = host->ports[i];
1718 if (!ata_link_offline(&ap->link)) {
1719 ehi = &ap->link.eh_info;
1720 ata_ehi_clear_desc(ehi);
1722 ata_ehi_push_desc(ehi,
1723 "PCI err cause 0x%08x", err_cause);
1724 err_mask = AC_ERR_HOST_BUS;
1725 ehi->action = ATA_EH_RESET;
1726 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1728 qc->err_mask |= err_mask;
1730 ehi->err_mask |= err_mask;
1732 ata_port_freeze(ap);
1738 * mv_interrupt - Main interrupt event handler
1740 * @dev_instance: private data; in this case the host structure
1742 * Read the read only register to determine if any host
1743 * controllers have pending interrupts. If so, call lower level
1744 * routine to handle. Also check for PCI errors which are only
1748 * This routine holds the host lock while processing pending
1751 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1753 struct ata_host *host = dev_instance;
1754 struct mv_host_priv *hpriv = host->private_data;
1755 unsigned int hc, handled = 0, n_hcs;
1756 void __iomem *mmio = hpriv->base;
1757 u32 irq_stat, irq_mask;
1759 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1760 spin_lock(&host->lock);
1762 irq_stat = readl(hpriv->main_cause_reg_addr);
1763 irq_mask = readl(hpriv->main_mask_reg_addr);
1765 /* check the cases where we either have nothing pending or have read
1766 * a bogus register value which can indicate HW removal or PCI fault
1768 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1771 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1773 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1774 mv_pci_error(host, mmio);
1776 goto out_unlock; /* skip all other HC irq handling */
1779 for (hc = 0; hc < n_hcs; hc++) {
1780 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1782 mv_host_intr(host, relevant, hc);
1788 spin_unlock(&host->lock);
1790 return IRQ_RETVAL(handled);
1793 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1797 switch (sc_reg_in) {
1801 ofs = sc_reg_in * sizeof(u32);
1810 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1812 struct mv_host_priv *hpriv = ap->host->private_data;
1813 void __iomem *mmio = hpriv->base;
1814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1824 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1826 struct mv_host_priv *hpriv = ap->host->private_data;
1827 void __iomem *mmio = hpriv->base;
1828 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1829 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1831 if (ofs != 0xffffffffU) {
1832 writelfl(val, addr + ofs);
1838 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1840 struct pci_dev *pdev = to_pci_dev(host->dev);
1843 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1846 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1848 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1851 mv_reset_pci_bus(host, mmio);
1854 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1856 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1859 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1862 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1865 tmp = readl(phy_mmio + MV5_PHY_MODE);
1867 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1868 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1871 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1875 writel(0, mmio + MV_GPIO_PORT_CTL);
1877 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1879 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1881 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1884 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1887 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1888 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1890 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1893 tmp = readl(phy_mmio + MV5_LT_MODE);
1895 writel(tmp, phy_mmio + MV5_LT_MODE);
1897 tmp = readl(phy_mmio + MV5_PHY_CTL);
1900 writel(tmp, phy_mmio + MV5_PHY_CTL);
1903 tmp = readl(phy_mmio + MV5_PHY_MODE);
1905 tmp |= hpriv->signal[port].pre;
1906 tmp |= hpriv->signal[port].amps;
1907 writel(tmp, phy_mmio + MV5_PHY_MODE);
1912 #define ZERO(reg) writel(0, port_mmio + (reg))
1913 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1916 void __iomem *port_mmio = mv_port_base(mmio, port);
1918 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1920 mv_reset_channel(hpriv, mmio, port);
1922 ZERO(0x028); /* command */
1923 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1924 ZERO(0x004); /* timer */
1925 ZERO(0x008); /* irq err cause */
1926 ZERO(0x00c); /* irq err mask */
1927 ZERO(0x010); /* rq bah */
1928 ZERO(0x014); /* rq inp */
1929 ZERO(0x018); /* rq outp */
1930 ZERO(0x01c); /* respq bah */
1931 ZERO(0x024); /* respq outp */
1932 ZERO(0x020); /* respq inp */
1933 ZERO(0x02c); /* test control */
1934 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1938 #define ZERO(reg) writel(0, hc_mmio + (reg))
1939 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1950 tmp = readl(hc_mmio + 0x20);
1953 writel(tmp, hc_mmio + 0x20);
1957 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1960 unsigned int hc, port;
1962 for (hc = 0; hc < n_hc; hc++) {
1963 for (port = 0; port < MV_PORTS_PER_HC; port++)
1964 mv5_reset_hc_port(hpriv, mmio,
1965 (hc * MV_PORTS_PER_HC) + port);
1967 mv5_reset_one_hc(hpriv, mmio, hc);
1974 #define ZERO(reg) writel(0, mmio + (reg))
1975 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1977 struct mv_host_priv *hpriv = host->private_data;
1980 tmp = readl(mmio + MV_PCI_MODE);
1982 writel(tmp, mmio + MV_PCI_MODE);
1984 ZERO(MV_PCI_DISC_TIMER);
1985 ZERO(MV_PCI_MSI_TRIGGER);
1986 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1987 ZERO(HC_MAIN_IRQ_MASK_OFS);
1988 ZERO(MV_PCI_SERR_MASK);
1989 ZERO(hpriv->irq_cause_ofs);
1990 ZERO(hpriv->irq_mask_ofs);
1991 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1992 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1993 ZERO(MV_PCI_ERR_ATTRIBUTE);
1994 ZERO(MV_PCI_ERR_COMMAND);
1998 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2002 mv5_reset_flash(hpriv, mmio);
2004 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2006 tmp |= (1 << 5) | (1 << 6);
2007 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2011 * mv6_reset_hc - Perform the 6xxx global soft reset
2012 * @mmio: base address of the HBA
2014 * This routine only applies to 6xxx parts.
2017 * Inherited from caller.
2019 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2022 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2026 /* Following procedure defined in PCI "main command and status
2030 writel(t | STOP_PCI_MASTER, reg);
2032 for (i = 0; i < 1000; i++) {
2035 if (PCI_MASTER_EMPTY & t)
2038 if (!(PCI_MASTER_EMPTY & t)) {
2039 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2047 writel(t | GLOB_SFT_RST, reg);
2050 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2052 if (!(GLOB_SFT_RST & t)) {
2053 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2058 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2061 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2064 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2066 if (GLOB_SFT_RST & t) {
2067 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2074 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2077 void __iomem *port_mmio;
2080 tmp = readl(mmio + MV_RESET_CFG);
2081 if ((tmp & (1 << 0)) == 0) {
2082 hpriv->signal[idx].amps = 0x7 << 8;
2083 hpriv->signal[idx].pre = 0x1 << 5;
2087 port_mmio = mv_port_base(mmio, idx);
2088 tmp = readl(port_mmio + PHY_MODE2);
2090 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2091 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2094 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2096 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2099 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2102 void __iomem *port_mmio = mv_port_base(mmio, port);
2104 u32 hp_flags = hpriv->hp_flags;
2106 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2108 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2111 if (fix_phy_mode2) {
2112 m2 = readl(port_mmio + PHY_MODE2);
2115 writel(m2, port_mmio + PHY_MODE2);
2119 m2 = readl(port_mmio + PHY_MODE2);
2120 m2 &= ~((1 << 16) | (1 << 31));
2121 writel(m2, port_mmio + PHY_MODE2);
2126 /* who knows what this magic does */
2127 tmp = readl(port_mmio + PHY_MODE3);
2130 writel(tmp, port_mmio + PHY_MODE3);
2132 if (fix_phy_mode4) {
2135 m4 = readl(port_mmio + PHY_MODE4);
2137 if (hp_flags & MV_HP_ERRATA_60X1B2)
2138 tmp = readl(port_mmio + PHY_MODE3);
2140 /* workaround for errata FEr SATA#10 (part 1) */
2141 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2143 writel(m4, port_mmio + PHY_MODE4);
2145 if (hp_flags & MV_HP_ERRATA_60X1B2)
2146 writel(tmp, port_mmio + PHY_MODE3);
2149 /* Revert values of pre-emphasis and signal amps to the saved ones */
2150 m2 = readl(port_mmio + PHY_MODE2);
2152 m2 &= ~MV_M2_PREAMP_MASK;
2153 m2 |= hpriv->signal[port].amps;
2154 m2 |= hpriv->signal[port].pre;
2157 /* according to mvSata 3.6.1, some IIE values are fixed */
2158 if (IS_GEN_IIE(hpriv)) {
2163 writel(m2, port_mmio + PHY_MODE2);
2166 /* TODO: use the generic LED interface to configure the SATA Presence */
2167 /* & Acitivy LEDs on the board */
2168 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2174 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2177 void __iomem *port_mmio;
2180 port_mmio = mv_port_base(mmio, idx);
2181 tmp = readl(port_mmio + PHY_MODE2);
2183 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2184 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2188 #define ZERO(reg) writel(0, port_mmio + (reg))
2189 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2190 void __iomem *mmio, unsigned int port)
2192 void __iomem *port_mmio = mv_port_base(mmio, port);
2194 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2196 mv_reset_channel(hpriv, mmio, port);
2198 ZERO(0x028); /* command */
2199 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2200 ZERO(0x004); /* timer */
2201 ZERO(0x008); /* irq err cause */
2202 ZERO(0x00c); /* irq err mask */
2203 ZERO(0x010); /* rq bah */
2204 ZERO(0x014); /* rq inp */
2205 ZERO(0x018); /* rq outp */
2206 ZERO(0x01c); /* respq bah */
2207 ZERO(0x024); /* respq outp */
2208 ZERO(0x020); /* respq inp */
2209 ZERO(0x02c); /* test control */
2210 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2215 #define ZERO(reg) writel(0, hc_mmio + (reg))
2216 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2219 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2229 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2230 void __iomem *mmio, unsigned int n_hc)
2234 for (port = 0; port < hpriv->n_ports; port++)
2235 mv_soc_reset_hc_port(hpriv, mmio, port);
2237 mv_soc_reset_one_hc(hpriv, mmio);
2242 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2248 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2253 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2254 unsigned int port_no)
2256 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2258 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2260 if (IS_GEN_II(hpriv)) {
2261 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2262 ifctl |= (1 << 7); /* enable gen2i speed */
2263 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2264 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2267 udelay(25); /* allow reset propagation */
2269 /* Spec never mentions clearing the bit. Marvell's driver does
2270 * clear the bit, however.
2272 writelfl(0, port_mmio + EDMA_CMD_OFS);
2274 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2276 if (IS_GEN_I(hpriv))
2281 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2282 * @ap: ATA channel to manipulate
2284 * Part of this is taken from __sata_phy_reset and modified to
2285 * not sleep since this routine gets called from interrupt level.
2288 * Inherited from caller. This is coded to safe to call at
2289 * interrupt level, i.e. it does not sleep.
2291 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2292 unsigned long deadline)
2294 struct mv_port_priv *pp = ap->private_data;
2295 struct mv_host_priv *hpriv = ap->host->private_data;
2296 void __iomem *port_mmio = mv_ap_base(ap);
2300 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2304 u32 sstatus, serror, scontrol;
2306 mv_scr_read(ap, SCR_STATUS, &sstatus);
2307 mv_scr_read(ap, SCR_ERROR, &serror);
2308 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2309 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2310 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2314 /* Issue COMRESET via SControl */
2316 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2319 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2323 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2324 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2328 } while (time_before(jiffies, deadline));
2330 /* work around errata */
2331 if (IS_GEN_II(hpriv) &&
2332 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2334 goto comreset_retry;
2338 u32 sstatus, serror, scontrol;
2340 mv_scr_read(ap, SCR_STATUS, &sstatus);
2341 mv_scr_read(ap, SCR_ERROR, &serror);
2342 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2343 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2344 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2348 if (ata_link_offline(&ap->link)) {
2349 *class = ATA_DEV_NONE;
2353 /* even after SStatus reflects that device is ready,
2354 * it seems to take a while for link to be fully
2355 * established (and thus Status no longer 0x80/0x7F),
2356 * so we poll a bit for that, here.
2360 u8 drv_stat = ata_check_status(ap);
2361 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2366 if (time_after(jiffies, deadline))
2370 /* FIXME: if we passed the deadline, the following
2371 * code probably produces an invalid result
2374 /* finally, read device signature from TF registers */
2375 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2377 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2379 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2384 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2386 mv_stop_edma(link->ap);
2390 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2391 unsigned long deadline)
2393 struct ata_port *ap = link->ap;
2394 struct mv_host_priv *hpriv = ap->host->private_data;
2395 void __iomem *mmio = hpriv->base;
2398 mv_reset_channel(hpriv, mmio, ap->port_no);
2399 mv_phy_reset(ap, class, deadline);
2404 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2406 struct ata_port *ap = link->ap;
2409 /* print link status */
2410 sata_print_link_status(link);
2413 sata_scr_read(link, SCR_ERROR, &serr);
2414 sata_scr_write_flush(link, SCR_ERROR, serr);
2416 /* bail out if no device is present */
2417 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2418 DPRINTK("EXIT, no device\n");
2422 /* set up device control */
2423 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2426 static void mv_eh_freeze(struct ata_port *ap)
2428 struct mv_host_priv *hpriv = ap->host->private_data;
2429 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2433 /* FIXME: handle coalescing completion events properly */
2435 shift = ap->port_no * 2;
2439 mask = 0x3 << shift;
2441 /* disable assertion of portN err, done events */
2442 tmp = readl(hpriv->main_mask_reg_addr);
2443 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2446 static void mv_eh_thaw(struct ata_port *ap)
2448 struct mv_host_priv *hpriv = ap->host->private_data;
2449 void __iomem *mmio = hpriv->base;
2450 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2451 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2452 void __iomem *port_mmio = mv_ap_base(ap);
2453 u32 tmp, mask, hc_irq_cause;
2454 unsigned int shift, hc_port_no = ap->port_no;
2456 /* FIXME: handle coalescing completion events properly */
2458 shift = ap->port_no * 2;
2464 mask = 0x3 << shift;
2466 /* clear EDMA errors on this port */
2467 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2469 /* clear pending irq events */
2470 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2471 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2472 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2473 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2475 /* enable assertion of portN err, done events */
2476 tmp = readl(hpriv->main_mask_reg_addr);
2477 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2481 * mv_port_init - Perform some early initialization on a single port.
2482 * @port: libata data structure storing shadow register addresses
2483 * @port_mmio: base address of the port
2485 * Initialize shadow register mmio addresses, clear outstanding
2486 * interrupts on the port, and unmask interrupts for the future
2487 * start of the port.
2490 * Inherited from caller.
2492 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2494 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2497 /* PIO related setup
2499 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2501 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2502 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2503 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2504 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2505 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2506 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2508 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2509 /* special case: control/altstatus doesn't have ATA_REG_ address */
2510 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2513 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2515 /* Clear any currently outstanding port interrupt conditions */
2516 serr_ofs = mv_scr_offset(SCR_ERROR);
2517 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2518 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2520 /* unmask all non-transient EDMA error interrupts */
2521 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2523 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2524 readl(port_mmio + EDMA_CFG_OFS),
2525 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2526 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2529 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 struct mv_host_priv *hpriv = host->private_data;
2533 u32 hp_flags = hpriv->hp_flags;
2535 switch (board_idx) {
2537 hpriv->ops = &mv5xxx_ops;
2538 hp_flags |= MV_HP_GEN_I;
2540 switch (pdev->revision) {
2542 hp_flags |= MV_HP_ERRATA_50XXB0;
2545 hp_flags |= MV_HP_ERRATA_50XXB2;
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying 50XXB2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_50XXB2;
2557 hpriv->ops = &mv5xxx_ops;
2558 hp_flags |= MV_HP_GEN_I;
2560 switch (pdev->revision) {
2562 hp_flags |= MV_HP_ERRATA_50XXB0;
2565 hp_flags |= MV_HP_ERRATA_50XXB2;
2568 dev_printk(KERN_WARNING, &pdev->dev,
2569 "Applying B2 workarounds to unknown rev\n");
2570 hp_flags |= MV_HP_ERRATA_50XXB2;
2577 hpriv->ops = &mv6xxx_ops;
2578 hp_flags |= MV_HP_GEN_II;
2580 switch (pdev->revision) {
2582 hp_flags |= MV_HP_ERRATA_60X1B2;
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
2588 dev_printk(KERN_WARNING, &pdev->dev,
2589 "Applying B2 workarounds to unknown rev\n");
2590 hp_flags |= MV_HP_ERRATA_60X1B2;
2596 hp_flags |= MV_HP_PCIE;
2597 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2598 (pdev->device == 0x2300 || pdev->device == 0x2310))
2601 * Highpoint RocketRAID PCIe 23xx series cards:
2603 * Unconfigured drives are treated as "Legacy"
2604 * by the BIOS, and it overwrites sector 8 with
2605 * a "Lgcy" metadata block prior to Linux boot.
2607 * Configured drives (RAID or JBOD) leave sector 8
2608 * alone, but instead overwrite a high numbered
2609 * sector for the RAID metadata. This sector can
2610 * be determined exactly, by truncating the physical
2611 * drive capacity to a nice even GB value.
2613 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2615 * Warn the user, lest they think we're just buggy.
2617 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2618 " BIOS CORRUPTS DATA on all attached drives,"
2619 " regardless of if/how they are configured."
2621 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2622 " use sectors 8-9 on \"Legacy\" drives,"
2623 " and avoid the final two gigabytes on"
2624 " all RocketRAID BIOS initialized drives.\n");
2627 hpriv->ops = &mv6xxx_ops;
2628 hp_flags |= MV_HP_GEN_IIE;
2630 switch (pdev->revision) {
2632 hp_flags |= MV_HP_ERRATA_XX42A0;
2635 hp_flags |= MV_HP_ERRATA_60X1C0;
2638 dev_printk(KERN_WARNING, &pdev->dev,
2639 "Applying 60X1C0 workarounds to unknown rev\n");
2640 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 hpriv->ops = &mv_soc_ops;
2646 hp_flags |= MV_HP_ERRATA_60X1C0;
2650 dev_printk(KERN_ERR, host->dev,
2651 "BUG: invalid board index %u\n", board_idx);
2655 hpriv->hp_flags = hp_flags;
2656 if (hp_flags & MV_HP_PCIE) {
2657 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2661 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2662 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2663 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2670 * mv_init_host - Perform some early initialization of the host.
2671 * @host: ATA host to initialize
2672 * @board_idx: controller index
2674 * If possible, do an early global reset of the host. Then do
2675 * our port init and clear/unmask all/relevant host interrupts.
2678 * Inherited from caller.
2680 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2682 int rc = 0, n_hc, port, hc;
2683 struct mv_host_priv *hpriv = host->private_data;
2684 void __iomem *mmio = hpriv->base;
2686 rc = mv_chip_id(host, board_idx);
2690 if (HAS_PCI(host)) {
2691 hpriv->main_cause_reg_addr = hpriv->base +
2692 HC_MAIN_IRQ_CAUSE_OFS;
2693 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2695 hpriv->main_cause_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2697 hpriv->main_mask_reg_addr = hpriv->base +
2698 HC_SOC_MAIN_IRQ_MASK_OFS;
2700 /* global interrupt mask */
2701 writel(0, hpriv->main_mask_reg_addr);
2703 n_hc = mv_get_hc_count(host->ports[0]->flags);
2705 for (port = 0; port < host->n_ports; port++)
2706 hpriv->ops->read_preamp(hpriv, port, mmio);
2708 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2712 hpriv->ops->reset_flash(hpriv, mmio);
2713 hpriv->ops->reset_bus(host, mmio);
2714 hpriv->ops->enable_leds(hpriv, mmio);
2716 for (port = 0; port < host->n_ports; port++) {
2717 if (IS_GEN_II(hpriv)) {
2718 void __iomem *port_mmio = mv_port_base(mmio, port);
2720 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2721 ifctl |= (1 << 7); /* enable gen2i speed */
2722 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2723 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2726 hpriv->ops->phy_errata(hpriv, mmio, port);
2729 for (port = 0; port < host->n_ports; port++) {
2730 struct ata_port *ap = host->ports[port];
2731 void __iomem *port_mmio = mv_port_base(mmio, port);
2733 mv_port_init(&ap->ioaddr, port_mmio);
2736 if (HAS_PCI(host)) {
2737 unsigned int offset = port_mmio - mmio;
2738 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2739 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2744 for (hc = 0; hc < n_hc; hc++) {
2745 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2747 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2748 "(before clear)=0x%08x\n", hc,
2749 readl(hc_mmio + HC_CFG_OFS),
2750 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2752 /* Clear any currently outstanding hc interrupt conditions */
2753 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2756 if (HAS_PCI(host)) {
2757 /* Clear any currently outstanding host interrupt conditions */
2758 writelfl(0, mmio + hpriv->irq_cause_ofs);
2760 /* and unmask interrupt generation for host regs */
2761 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2762 if (IS_GEN_I(hpriv))
2763 writelfl(~HC_MAIN_MASKED_IRQS_5,
2764 hpriv->main_mask_reg_addr);
2766 writelfl(~HC_MAIN_MASKED_IRQS,
2767 hpriv->main_mask_reg_addr);
2769 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2770 "PCI int cause/mask=0x%08x/0x%08x\n",
2771 readl(hpriv->main_cause_reg_addr),
2772 readl(hpriv->main_mask_reg_addr),
2773 readl(mmio + hpriv->irq_cause_ofs),
2774 readl(mmio + hpriv->irq_mask_ofs));
2776 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2777 hpriv->main_mask_reg_addr);
2778 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2779 readl(hpriv->main_cause_reg_addr),
2780 readl(hpriv->main_mask_reg_addr));
2786 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2788 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2790 if (!hpriv->crqb_pool)
2793 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2795 if (!hpriv->crpb_pool)
2798 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2800 if (!hpriv->sg_tbl_pool)
2807 * mv_platform_probe - handle a positive probe of an soc Marvell
2809 * @pdev: platform device found
2812 * Inherited from caller.
2814 static int mv_platform_probe(struct platform_device *pdev)
2816 static int printed_version;
2817 const struct mv_sata_platform_data *mv_platform_data;
2818 const struct ata_port_info *ppi[] =
2819 { &mv_port_info[chip_soc], NULL };
2820 struct ata_host *host;
2821 struct mv_host_priv *hpriv;
2822 struct resource *res;
2825 if (!printed_version++)
2826 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2829 * Simple resource validation ..
2831 if (unlikely(pdev->num_resources != 2)) {
2832 dev_err(&pdev->dev, "invalid number of resources\n");
2837 * Get the register base first
2839 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2844 mv_platform_data = pdev->dev.platform_data;
2845 n_ports = mv_platform_data->n_ports;
2847 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2848 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2850 if (!host || !hpriv)
2852 host->private_data = hpriv;
2853 hpriv->n_ports = n_ports;
2856 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2857 res->end - res->start + 1);
2858 hpriv->base -= MV_SATAHC0_REG_BASE;
2860 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2864 /* initialize adapter */
2865 rc = mv_init_host(host, chip_soc);
2869 dev_printk(KERN_INFO, &pdev->dev,
2870 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2873 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2874 IRQF_SHARED, &mv6_sht);
2879 * mv_platform_remove - unplug a platform interface
2880 * @pdev: platform device
2882 * A platform bus SATA device has been unplugged. Perform the needed
2883 * cleanup. Also called on module unload for any active devices.
2885 static int __devexit mv_platform_remove(struct platform_device *pdev)
2887 struct device *dev = &pdev->dev;
2888 struct ata_host *host = dev_get_drvdata(dev);
2890 ata_host_detach(host);
2894 static struct platform_driver mv_platform_driver = {
2895 .probe = mv_platform_probe,
2896 .remove = __devexit_p(mv_platform_remove),
2899 .owner = THIS_MODULE,
2905 static int mv_pci_init_one(struct pci_dev *pdev,
2906 const struct pci_device_id *ent);
2909 static struct pci_driver mv_pci_driver = {
2911 .id_table = mv_pci_tbl,
2912 .probe = mv_pci_init_one,
2913 .remove = ata_pci_remove_one,
2919 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2922 /* move to PCI layer or libata core? */
2923 static int pci_go_64(struct pci_dev *pdev)
2927 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2930 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2932 dev_printk(KERN_ERR, &pdev->dev,
2933 "64-bit DMA enable failed\n");
2938 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2940 dev_printk(KERN_ERR, &pdev->dev,
2941 "32-bit DMA enable failed\n");
2944 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2946 dev_printk(KERN_ERR, &pdev->dev,
2947 "32-bit consistent DMA enable failed\n");
2956 * mv_print_info - Dump key info to kernel log for perusal.
2957 * @host: ATA host to print info about
2959 * FIXME: complete this.
2962 * Inherited from caller.
2964 static void mv_print_info(struct ata_host *host)
2966 struct pci_dev *pdev = to_pci_dev(host->dev);
2967 struct mv_host_priv *hpriv = host->private_data;
2969 const char *scc_s, *gen;
2971 /* Use this to determine the HW stepping of the chip so we know
2972 * what errata to workaround
2974 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2977 else if (scc == 0x01)
2982 if (IS_GEN_I(hpriv))
2984 else if (IS_GEN_II(hpriv))
2986 else if (IS_GEN_IIE(hpriv))
2991 dev_printk(KERN_INFO, &pdev->dev,
2992 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2993 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2994 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2998 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2999 * @pdev: PCI device found
3000 * @ent: PCI device ID entry for the matched host
3003 * Inherited from caller.
3005 static int mv_pci_init_one(struct pci_dev *pdev,
3006 const struct pci_device_id *ent)
3008 static int printed_version;
3009 unsigned int board_idx = (unsigned int)ent->driver_data;
3010 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3011 struct ata_host *host;
3012 struct mv_host_priv *hpriv;
3015 if (!printed_version++)
3016 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3019 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3021 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3022 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3023 if (!host || !hpriv)
3025 host->private_data = hpriv;
3026 hpriv->n_ports = n_ports;
3028 /* acquire resources */
3029 rc = pcim_enable_device(pdev);
3033 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3035 pcim_pin_device(pdev);
3038 host->iomap = pcim_iomap_table(pdev);
3039 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3041 rc = pci_go_64(pdev);
3045 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3049 /* initialize adapter */
3050 rc = mv_init_host(host, board_idx);
3054 /* Enable interrupts */
3055 if (msi && pci_enable_msi(pdev))
3058 mv_dump_pci_cfg(pdev, 0x68);
3059 mv_print_info(host);
3061 pci_set_master(pdev);
3062 pci_try_set_mwi(pdev);
3063 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3064 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3068 static int mv_platform_probe(struct platform_device *pdev);
3069 static int __devexit mv_platform_remove(struct platform_device *pdev);
3071 static int __init mv_init(void)
3075 rc = pci_register_driver(&mv_pci_driver);
3079 rc = platform_driver_register(&mv_platform_driver);
3083 pci_unregister_driver(&mv_pci_driver);
3088 static void __exit mv_exit(void)
3091 pci_unregister_driver(&mv_pci_driver);
3093 platform_driver_unregister(&mv_platform_driver);
3096 MODULE_AUTHOR("Brett Russ");
3097 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3098 MODULE_LICENSE("GPL");
3099 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3100 MODULE_VERSION(DRV_VERSION);
3101 MODULE_ALIAS("platform:sata_mv");
3104 module_param(msi, int, 0444);
3105 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3108 module_init(mv_init);
3109 module_exit(mv_exit);