2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
85 #define DRV_NAME "sata_mv"
86 #define DRV_VERSION "1.20"
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
105 MV_SATAHC0_REG_BASE = 0x20000,
106 MV_FLASH_CTL = 0x1046c,
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 /* SoC integrated controllers, no PCI interface */
137 MV_FLAG_SOC = (1 << 28),
139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
144 CRQB_FLAG_READ = (1 << 0),
146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
153 CRPB_FLAG_STATUS_SHIFT = 8,
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
159 /* PCI interface registers */
161 PCI_COMMAND_OFS = 0xc00,
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 PORT0_ERR = (1 << 0), /* shift by port # */
192 PORT0_DONE = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
214 /* SATAHC registers */
217 HC_IRQ_CAUSE_OFS = 0x14,
218 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
219 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
220 DEV_IRQ = (1 << 8), /* shift by port # */
222 /* Shadow block registers */
224 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
228 SATA_ACTIVE_OFS = 0x350,
229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
247 SATA_INTERFACE_CFG = 0x050,
249 MV_M2_PREAMP_MASK = 0x7e0,
253 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
254 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
263 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
264 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
265 EDMA_ERR_DEV = (1 << 2), /* device error */
266 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
267 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
268 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
269 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
270 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
271 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
272 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
273 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
274 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
275 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
276 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
278 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
279 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
280 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
281 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
282 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
286 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
287 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
288 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
289 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
290 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
291 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
295 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
296 EDMA_ERR_OVERRUN_5 = (1 << 5),
297 EDMA_ERR_UNDERRUN_5 = (1 << 6),
299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
300 EDMA_ERR_LNK_CTRL_RX_1 |
301 EDMA_ERR_LNK_CTRL_RX_3 |
302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
316 EDMA_ERR_LNK_CTRL_RX_2 |
317 EDMA_ERR_LNK_DATA_RX |
318 EDMA_ERR_LNK_DATA_TX |
319 EDMA_ERR_TRANS_PROTO,
321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
326 EDMA_ERR_UNDERRUN_5 |
327 EDMA_ERR_SELF_DIS_5 |
333 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
334 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
336 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
337 EDMA_REQ_Q_PTR_SHIFT = 5,
339 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
340 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
341 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
342 EDMA_RSP_Q_PTR_SHIFT = 3,
344 EDMA_CMD_OFS = 0x28, /* EDMA command register */
345 EDMA_EN = (1 << 0), /* enable EDMA */
346 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
347 ATA_RST = (1 << 2), /* reset trans/link/phy */
349 EDMA_IORDY_TMOUT = 0x34,
352 /* Host private flags (hp_flags) */
353 MV_HP_FLAG_MSI = (1 << 0),
354 MV_HP_ERRATA_50XXB0 = (1 << 1),
355 MV_HP_ERRATA_50XXB2 = (1 << 2),
356 MV_HP_ERRATA_60X1B2 = (1 << 3),
357 MV_HP_ERRATA_60X1C0 = (1 << 4),
358 MV_HP_ERRATA_XX42A0 = (1 << 5),
359 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
360 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
361 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
362 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
364 /* Port private flags (pp_flags) */
365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
366 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
369 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
370 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
371 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
372 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
374 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
375 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
378 /* DMA boundary 0xffff is required by the s/g splitting
379 * we need on /length/ in mv_fill-sg().
381 MV_DMA_BOUNDARY = 0xffffU,
383 /* mask of register bits containing lower 32 bits
384 * of EDMA request queue DMA address
386 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
388 /* ditto, for response queue */
389 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
403 /* Command ReQuest Block: 32B */
419 /* Command ResPonse Block: 8B */
426 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
434 struct mv_port_priv {
435 struct mv_crqb *crqb;
437 struct mv_crpb *crpb;
439 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
440 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
442 unsigned int req_idx;
443 unsigned int resp_idx;
448 struct mv_port_signal {
453 struct mv_host_priv {
455 struct mv_port_signal signal[8];
456 const struct mv_hw_ops *ops;
459 void __iomem *main_cause_reg_addr;
460 void __iomem *main_mask_reg_addr;
465 * These consistent DMA memory pools give us guaranteed
466 * alignment for hardware-accessed data structures,
467 * and less memory waste in accomplishing the alignment.
469 struct dma_pool *crqb_pool;
470 struct dma_pool *crpb_pool;
471 struct dma_pool *sg_tbl_pool;
475 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
478 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
480 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
482 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
483 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
486 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
487 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
488 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
490 static int mv_port_start(struct ata_port *ap);
491 static void mv_port_stop(struct ata_port *ap);
492 static void mv_qc_prep(struct ata_queued_cmd *qc);
493 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
494 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
495 static int mv_hardreset(struct ata_link *link, unsigned int *class,
496 unsigned long deadline);
497 static void mv_eh_freeze(struct ata_port *ap);
498 static void mv_eh_thaw(struct ata_port *ap);
499 static void mv6_dev_config(struct ata_device *dev);
501 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
503 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
504 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
506 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
508 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
509 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
511 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
513 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
514 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
516 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
518 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
519 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
521 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
523 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
524 void __iomem *mmio, unsigned int n_hc);
525 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
527 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
528 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
529 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
530 unsigned int port_no);
531 static int mv_stop_edma(struct ata_port *ap);
532 static int mv_stop_edma_engine(void __iomem *port_mmio);
533 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
535 static void mv_pmp_select(struct ata_port *ap, int pmp);
536 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
537 unsigned long deadline);
538 static int mv_softreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
541 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
542 * because we have to allow room for worst case splitting of
543 * PRDs for 64K boundaries in mv_fill_sg().
545 static struct scsi_host_template mv5_sht = {
546 ATA_BASE_SHT(DRV_NAME),
547 .sg_tablesize = MV_MAX_SG_CT / 2,
548 .dma_boundary = MV_DMA_BOUNDARY,
551 static struct scsi_host_template mv6_sht = {
552 ATA_NCQ_SHT(DRV_NAME),
553 .can_queue = MV_MAX_Q_DEPTH - 1,
554 .sg_tablesize = MV_MAX_SG_CT / 2,
555 .dma_boundary = MV_DMA_BOUNDARY,
558 static struct ata_port_operations mv5_ops = {
559 .inherits = &ata_sff_port_ops,
561 .qc_prep = mv_qc_prep,
562 .qc_issue = mv_qc_issue,
564 .freeze = mv_eh_freeze,
566 .hardreset = mv_hardreset,
567 .error_handler = ata_std_error_handler, /* avoid SFF EH */
568 .post_internal_cmd = ATA_OP_NULL,
570 .scr_read = mv5_scr_read,
571 .scr_write = mv5_scr_write,
573 .port_start = mv_port_start,
574 .port_stop = mv_port_stop,
577 static struct ata_port_operations mv6_ops = {
578 .inherits = &mv5_ops,
579 .qc_defer = sata_pmp_qc_defer_cmd_switch,
580 .dev_config = mv6_dev_config,
581 .scr_read = mv_scr_read,
582 .scr_write = mv_scr_write,
584 .pmp_hardreset = mv_pmp_hardreset,
585 .pmp_softreset = mv_softreset,
586 .softreset = mv_softreset,
587 .error_handler = sata_pmp_error_handler,
590 static struct ata_port_operations mv_iie_ops = {
591 .inherits = &mv6_ops,
592 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
593 .dev_config = ATA_OP_NULL,
594 .qc_prep = mv_qc_prep_iie,
597 static const struct ata_port_info mv_port_info[] = {
599 .flags = MV_COMMON_FLAGS,
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv5_ops,
605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv5_ops,
611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv5_ops,
617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
618 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv6_ops,
625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
627 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
628 .pio_mask = 0x1f, /* pio0-4 */
629 .udma_mask = ATA_UDMA6,
630 .port_ops = &mv6_ops,
633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
634 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
636 .pio_mask = 0x1f, /* pio0-4 */
637 .udma_mask = ATA_UDMA6,
638 .port_ops = &mv_iie_ops,
641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
644 .pio_mask = 0x1f, /* pio0-4 */
645 .udma_mask = ATA_UDMA6,
646 .port_ops = &mv_iie_ops,
649 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
650 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
651 ATA_FLAG_NCQ | MV_FLAG_SOC,
652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv_iie_ops,
658 static const struct pci_device_id mv_pci_tbl[] = {
659 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
661 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
662 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
663 /* RocketRAID 1740/174x have different identifiers */
664 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
665 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
667 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
669 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
670 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
671 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
673 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
676 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
678 /* Marvell 7042 support */
679 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
681 /* Highpoint RocketRAID PCIe series */
682 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
683 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
685 { } /* terminate list */
688 static const struct mv_hw_ops mv5xxx_ops = {
689 .phy_errata = mv5_phy_errata,
690 .enable_leds = mv5_enable_leds,
691 .read_preamp = mv5_read_preamp,
692 .reset_hc = mv5_reset_hc,
693 .reset_flash = mv5_reset_flash,
694 .reset_bus = mv5_reset_bus,
697 static const struct mv_hw_ops mv6xxx_ops = {
698 .phy_errata = mv6_phy_errata,
699 .enable_leds = mv6_enable_leds,
700 .read_preamp = mv6_read_preamp,
701 .reset_hc = mv6_reset_hc,
702 .reset_flash = mv6_reset_flash,
703 .reset_bus = mv_reset_pci_bus,
706 static const struct mv_hw_ops mv_soc_ops = {
707 .phy_errata = mv6_phy_errata,
708 .enable_leds = mv_soc_enable_leds,
709 .read_preamp = mv_soc_read_preamp,
710 .reset_hc = mv_soc_reset_hc,
711 .reset_flash = mv_soc_reset_flash,
712 .reset_bus = mv_soc_reset_bus,
719 static inline void writelfl(unsigned long data, void __iomem *addr)
722 (void) readl(addr); /* flush to avoid PCI posted write */
725 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
730 static inline unsigned int mv_hc_from_port(unsigned int port)
732 return port >> MV_PORT_HC_SHIFT;
735 static inline unsigned int mv_hardport_from_port(unsigned int port)
737 return port & MV_PORT_MASK;
740 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
743 return mv_hc_base(base, mv_hc_from_port(port));
746 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
748 return mv_hc_base_from_port(base, port) +
749 MV_SATAHC_ARBTR_REG_SZ +
750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
753 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
755 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
756 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
758 return hc_mmio + ofs;
761 static inline void __iomem *mv_host_base(struct ata_host *host)
763 struct mv_host_priv *hpriv = host->private_data;
767 static inline void __iomem *mv_ap_base(struct ata_port *ap)
769 return mv_port_base(mv_host_base(ap->host), ap->port_no);
772 static inline int mv_get_hc_count(unsigned long port_flags)
774 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
777 static void mv_set_edma_ptrs(void __iomem *port_mmio,
778 struct mv_host_priv *hpriv,
779 struct mv_port_priv *pp)
784 * initialize request queue
786 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
788 WARN_ON(pp->crqb_dma & 0x3ff);
789 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
790 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
791 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
794 writelfl((pp->crqb_dma & 0xffffffff) | index,
795 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
797 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
800 * initialize response queue
802 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
804 WARN_ON(pp->crpb_dma & 0xff);
805 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
808 writelfl((pp->crpb_dma & 0xffffffff) | index,
809 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
811 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
813 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
814 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
818 * mv_start_dma - Enable eDMA engine
819 * @base: port base address
820 * @pp: port private data
822 * Verify the local cache of the eDMA state is accurate with a
826 * Inherited from caller.
828 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
829 struct mv_port_priv *pp, u8 protocol)
831 int want_ncq = (protocol == ATA_PROT_NCQ);
833 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
834 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
835 if (want_ncq != using_ncq)
838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
839 struct mv_host_priv *hpriv = ap->host->private_data;
840 int hard_port = mv_hardport_from_port(ap->port_no);
841 void __iomem *hc_mmio = mv_hc_base_from_port(
842 mv_host_base(ap->host), hard_port);
843 u32 hc_irq_cause, ipending;
845 /* clear EDMA event indicators, if any */
846 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
848 /* clear EDMA interrupt indicator, if any */
849 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
850 ipending = (DEV_IRQ << hard_port) |
851 (CRPB_DMA_DONE << hard_port);
852 if (hc_irq_cause & ipending) {
853 writelfl(hc_irq_cause & ~ipending,
854 hc_mmio + HC_IRQ_CAUSE_OFS);
857 mv_edma_cfg(ap, want_ncq);
859 /* clear FIS IRQ Cause */
860 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
862 mv_set_edma_ptrs(port_mmio, hpriv, pp);
864 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
865 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
867 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
871 * mv_stop_edma_engine - Disable eDMA engine
872 * @port_mmio: io base address
875 * Inherited from caller.
877 static int mv_stop_edma_engine(void __iomem *port_mmio)
881 /* Disable eDMA. The disable bit auto clears. */
882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
884 /* Wait for the chip to confirm eDMA is off. */
885 for (i = 10000; i > 0; i--) {
886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
887 if (!(reg & EDMA_EN))
894 static int mv_stop_edma(struct ata_port *ap)
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
910 static void mv_dump_mem(void __iomem *start, unsigned bytes)
913 for (b = 0; b < bytes; ) {
914 DPRINTK("%p: ", start + b);
915 for (w = 0; b < bytes && w < 4; w++) {
916 printk("%08x ", readl(start + b));
924 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
929 for (b = 0; b < bytes; ) {
930 DPRINTK("%02x: ", b);
931 for (w = 0; b < bytes && w < 4; w++) {
932 (void) pci_read_config_dword(pdev, b, &dw);
940 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
941 struct pci_dev *pdev)
944 void __iomem *hc_base = mv_hc_base(mmio_base,
945 port >> MV_PORT_HC_SHIFT);
946 void __iomem *port_base;
947 int start_port, num_ports, p, start_hc, num_hcs, hc;
950 start_hc = start_port = 0;
951 num_ports = 8; /* shld be benign for 4 port devs */
954 start_hc = port >> MV_PORT_HC_SHIFT;
956 num_ports = num_hcs = 1;
958 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
959 num_ports > 1 ? num_ports - 1 : start_port);
962 DPRINTK("PCI config space regs:\n");
963 mv_dump_pci_cfg(pdev, 0x68);
965 DPRINTK("PCI regs:\n");
966 mv_dump_mem(mmio_base+0xc00, 0x3c);
967 mv_dump_mem(mmio_base+0xd00, 0x34);
968 mv_dump_mem(mmio_base+0xf00, 0x4);
969 mv_dump_mem(mmio_base+0x1d00, 0x6c);
970 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
971 hc_base = mv_hc_base(mmio_base, hc);
972 DPRINTK("HC regs (HC %i):\n", hc);
973 mv_dump_mem(hc_base, 0x1c);
975 for (p = start_port; p < start_port + num_ports; p++) {
976 port_base = mv_port_base(mmio_base, p);
977 DPRINTK("EDMA regs (port %i):\n", p);
978 mv_dump_mem(port_base, 0x54);
979 DPRINTK("SATA regs (port %i):\n", p);
980 mv_dump_mem(port_base+0x300, 0x60);
985 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
993 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
996 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1005 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1007 unsigned int ofs = mv_scr_offset(sc_reg_in);
1009 if (ofs != 0xffffffffU) {
1010 *val = readl(mv_ap_base(ap) + ofs);
1016 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1018 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020 if (ofs != 0xffffffffU) {
1021 writelfl(val, mv_ap_base(ap) + ofs);
1027 static void mv6_dev_config(struct ata_device *adev)
1030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1038 if (adev->flags & ATA_DFLAG_NCQ) {
1039 if (sata_pmp_attached(adev->link->ap))
1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS)
1042 adev->max_sectors = ATA_MAX_SECTORS;
1046 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1048 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1050 * Various bit settings required for operation
1051 * in FIS-based switching (fbs) mode on GenIIe:
1053 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1054 old_ltmode = readl(port_mmio + LTMODE_OFS);
1056 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1057 new_ltmode = old_ltmode | LTMODE_BIT8;
1058 } else { /* disable fbs */
1059 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1060 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1062 if (new_fcfg != old_fcfg)
1063 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1064 if (new_ltmode != old_ltmode)
1065 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1068 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1071 struct mv_port_priv *pp = ap->private_data;
1072 struct mv_host_priv *hpriv = ap->host->private_data;
1073 void __iomem *port_mmio = mv_ap_base(ap);
1075 /* set up non-NCQ EDMA configuration */
1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1078 if (IS_GEN_I(hpriv))
1079 cfg |= (1 << 8); /* enab config burst size mask */
1081 else if (IS_GEN_II(hpriv))
1082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1084 else if (IS_GEN_IIE(hpriv)) {
1085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1087 cfg |= (1 << 18); /* enab early completion */
1088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1090 if (want_ncq && sata_pmp_attached(ap)) {
1091 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1092 mv_config_fbs(port_mmio, 1);
1094 mv_config_fbs(port_mmio, 0);
1099 cfg |= EDMA_CFG_NCQ;
1100 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1102 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1104 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1107 static void mv_port_free_dma_mem(struct ata_port *ap)
1109 struct mv_host_priv *hpriv = ap->host->private_data;
1110 struct mv_port_priv *pp = ap->private_data;
1114 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1118 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1122 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1123 * For later hardware, we have one unique sg_tbl per NCQ tag.
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (pp->sg_tbl[tag]) {
1127 if (tag == 0 || !IS_GEN_I(hpriv))
1128 dma_pool_free(hpriv->sg_tbl_pool,
1130 pp->sg_tbl_dma[tag]);
1131 pp->sg_tbl[tag] = NULL;
1137 * mv_port_start - Port specific init/start routine.
1138 * @ap: ATA channel to manipulate
1140 * Allocate and point to DMA memory, init port private memory,
1144 * Inherited from caller.
1146 static int mv_port_start(struct ata_port *ap)
1148 struct device *dev = ap->host->dev;
1149 struct mv_host_priv *hpriv = ap->host->private_data;
1150 struct mv_port_priv *pp;
1153 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1156 ap->private_data = pp;
1158 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1161 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1163 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1165 goto out_port_free_dma_mem;
1166 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1169 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1170 * For later hardware, we need one unique sg_tbl per NCQ tag.
1172 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1173 if (tag == 0 || !IS_GEN_I(hpriv)) {
1174 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1175 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1176 if (!pp->sg_tbl[tag])
1177 goto out_port_free_dma_mem;
1179 pp->sg_tbl[tag] = pp->sg_tbl[0];
1180 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1185 out_port_free_dma_mem:
1186 mv_port_free_dma_mem(ap);
1191 * mv_port_stop - Port specific cleanup/stop routine.
1192 * @ap: ATA channel to manipulate
1194 * Stop DMA, cleanup port memory.
1197 * This routine uses the host lock to protect the DMA stop.
1199 static void mv_port_stop(struct ata_port *ap)
1202 mv_port_free_dma_mem(ap);
1206 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1207 * @qc: queued command whose SG list to source from
1209 * Populate the SG list and mark the last entry.
1212 * Inherited from caller.
1214 static void mv_fill_sg(struct ata_queued_cmd *qc)
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 struct scatterlist *sg;
1218 struct mv_sg *mv_sg, *last_sg = NULL;
1221 mv_sg = pp->sg_tbl[qc->tag];
1222 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1223 dma_addr_t addr = sg_dma_address(sg);
1224 u32 sg_len = sg_dma_len(sg);
1227 u32 offset = addr & 0xffff;
1230 if ((offset + sg_len > 0x10000))
1231 len = 0x10000 - offset;
1233 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1234 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1235 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1245 if (likely(last_sg))
1246 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1249 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1251 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1252 (last ? CRQB_CMD_LAST : 0);
1253 *cmdw = cpu_to_le16(tmp);
1257 * mv_qc_prep - Host specific command preparation.
1258 * @qc: queued command to prepare
1260 * This routine simply redirects to the general purpose routine
1261 * if command is not DMA. Else, it handles prep of the CRQB
1262 * (command request block), does some sanity checking, and calls
1263 * the SG load routine.
1266 * Inherited from caller.
1268 static void mv_qc_prep(struct ata_queued_cmd *qc)
1270 struct ata_port *ap = qc->ap;
1271 struct mv_port_priv *pp = ap->private_data;
1273 struct ata_taskfile *tf;
1277 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1278 (qc->tf.protocol != ATA_PROT_NCQ))
1281 /* Fill in command request block
1283 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1284 flags |= CRQB_FLAG_READ;
1285 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1286 flags |= qc->tag << CRQB_TAG_SHIFT;
1287 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1292 pp->crqb[in_index].sg_addr =
1293 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1294 pp->crqb[in_index].sg_addr_hi =
1295 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1296 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1298 cw = &pp->crqb[in_index].ata_cmd[0];
1301 /* Sadly, the CRQB cannot accomodate all registers--there are
1302 * only 11 bytes...so we must pick and choose required
1303 * registers based on the command. So, we drop feature and
1304 * hob_feature for [RW] DMA commands, but they are needed for
1305 * NCQ. NCQ will drop hob_nsect.
1307 switch (tf->command) {
1309 case ATA_CMD_READ_EXT:
1311 case ATA_CMD_WRITE_EXT:
1312 case ATA_CMD_WRITE_FUA_EXT:
1313 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1315 case ATA_CMD_FPDMA_READ:
1316 case ATA_CMD_FPDMA_WRITE:
1317 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1318 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1321 /* The only other commands EDMA supports in non-queued and
1322 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1323 * of which are defined/used by Linux. If we get here, this
1324 * driver needs work.
1326 * FIXME: modify libata to give qc_prep a return value and
1327 * return error here.
1329 BUG_ON(tf->command);
1332 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1335 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1336 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1337 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1338 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1339 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1340 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1342 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1348 * mv_qc_prep_iie - Host specific command preparation.
1349 * @qc: queued command to prepare
1351 * This routine simply redirects to the general purpose routine
1352 * if command is not DMA. Else, it handles prep of the CRQB
1353 * (command request block), does some sanity checking, and calls
1354 * the SG load routine.
1357 * Inherited from caller.
1359 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1361 struct ata_port *ap = qc->ap;
1362 struct mv_port_priv *pp = ap->private_data;
1363 struct mv_crqb_iie *crqb;
1364 struct ata_taskfile *tf;
1368 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1369 (qc->tf.protocol != ATA_PROT_NCQ))
1372 /* Fill in Gen IIE command request block */
1373 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1374 flags |= CRQB_FLAG_READ;
1376 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1377 flags |= qc->tag << CRQB_TAG_SHIFT;
1378 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1379 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1381 /* get current queue index from software */
1382 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1384 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1385 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1386 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1387 crqb->flags = cpu_to_le32(flags);
1390 crqb->ata_cmd[0] = cpu_to_le32(
1391 (tf->command << 16) |
1394 crqb->ata_cmd[1] = cpu_to_le32(
1400 crqb->ata_cmd[2] = cpu_to_le32(
1401 (tf->hob_lbal << 0) |
1402 (tf->hob_lbam << 8) |
1403 (tf->hob_lbah << 16) |
1404 (tf->hob_feature << 24)
1406 crqb->ata_cmd[3] = cpu_to_le32(
1408 (tf->hob_nsect << 8)
1411 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1417 * mv_qc_issue - Initiate a command to the host
1418 * @qc: queued command to start
1420 * This routine simply redirects to the general purpose routine
1421 * if command is not DMA. Else, it sanity checks our local
1422 * caches of the request producer/consumer indices then enables
1423 * DMA and bumps the request producer index.
1426 * Inherited from caller.
1428 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1430 struct ata_port *ap = qc->ap;
1431 void __iomem *port_mmio = mv_ap_base(ap);
1432 struct mv_port_priv *pp = ap->private_data;
1435 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1436 (qc->tf.protocol != ATA_PROT_NCQ)) {
1438 * We're about to send a non-EDMA capable command to the
1439 * port. Turn off EDMA so there won't be problems accessing
1440 * shadow block, etc registers.
1443 mv_pmp_select(ap, qc->dev->link->pmp);
1444 return ata_sff_qc_issue(qc);
1447 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1451 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1453 /* and write the request in pointer to kick the EDMA to life */
1454 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1455 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1461 * mv_err_intr - Handle error interrupts on the port
1462 * @ap: ATA channel to manipulate
1463 * @reset_allowed: bool: 0 == don't trigger from reset here
1465 * In most cases, just clear the interrupt and move on. However,
1466 * some cases require an eDMA reset, which also performs a COMRESET.
1467 * The SERR case requires a clear of pending errors in the SATA
1468 * SERROR register. Finally, if the port disabled DMA,
1469 * update our cached copy to match.
1472 * Inherited from caller.
1474 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1476 void __iomem *port_mmio = mv_ap_base(ap);
1477 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1478 struct mv_port_priv *pp = ap->private_data;
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1481 unsigned int action = 0, err_mask = 0;
1482 struct ata_eh_info *ehi = &ap->link.eh_info;
1484 ata_ehi_clear_desc(ehi);
1486 if (!edma_enabled) {
1487 /* just a guess: do we need to do this? should we
1488 * expand this, and do it in all cases?
1490 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1491 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1494 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1496 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1499 * all generations share these EDMA error cause bits
1502 if (edma_err_cause & EDMA_ERR_DEV)
1503 err_mask |= AC_ERR_DEV;
1504 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1505 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1506 EDMA_ERR_INTRL_PAR)) {
1507 err_mask |= AC_ERR_ATA_BUS;
1508 action |= ATA_EH_RESET;
1509 ata_ehi_push_desc(ehi, "parity error");
1511 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1512 ata_ehi_hotplugged(ehi);
1513 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1514 "dev disconnect" : "dev connect");
1515 action |= ATA_EH_RESET;
1518 if (IS_GEN_I(hpriv)) {
1519 eh_freeze_mask = EDMA_EH_FREEZE_5;
1521 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1522 pp = ap->private_data;
1523 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1524 ata_ehi_push_desc(ehi, "EDMA self-disable");
1527 eh_freeze_mask = EDMA_EH_FREEZE;
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1530 pp = ap->private_data;
1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
1535 if (edma_err_cause & EDMA_ERR_SERR) {
1536 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1538 err_mask = AC_ERR_ATA_BUS;
1539 action |= ATA_EH_RESET;
1543 /* Clear EDMA now that SERR cleanup done */
1544 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1547 err_mask = AC_ERR_OTHER;
1548 action |= ATA_EH_RESET;
1551 ehi->serror |= serr;
1552 ehi->action |= action;
1555 qc->err_mask |= err_mask;
1557 ehi->err_mask |= err_mask;
1559 if (edma_err_cause & eh_freeze_mask)
1560 ata_port_freeze(ap);
1565 static void mv_intr_pio(struct ata_port *ap)
1567 struct ata_queued_cmd *qc;
1570 /* ignore spurious intr if drive still BUSY */
1571 ata_status = readb(ap->ioaddr.status_addr);
1572 if (unlikely(ata_status & ATA_BUSY))
1575 /* get active ATA command */
1576 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1577 if (unlikely(!qc)) /* no active tag */
1579 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1582 /* and finally, complete the ATA command */
1583 qc->err_mask |= ac_err_mask(ata_status);
1584 ata_qc_complete(qc);
1587 static void mv_intr_edma(struct ata_port *ap)
1589 void __iomem *port_mmio = mv_ap_base(ap);
1590 struct mv_host_priv *hpriv = ap->host->private_data;
1591 struct mv_port_priv *pp = ap->private_data;
1592 struct ata_queued_cmd *qc;
1593 u32 out_index, in_index;
1594 bool work_done = false;
1596 /* get h/w response queue pointer */
1597 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1598 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1604 /* get s/w response queue last-read pointer, and compare */
1605 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1606 if (in_index == out_index)
1609 /* 50xx: get active ATA command */
1610 if (IS_GEN_I(hpriv))
1611 tag = ap->link.active_tag;
1613 /* Gen II/IIE: get active ATA command via tag, to enable
1614 * support for queueing. this works transparently for
1615 * queued and non-queued modes.
1618 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1620 qc = ata_qc_from_tag(ap, tag);
1622 /* For non-NCQ mode, the lower 8 bits of status
1623 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1624 * which should be zero if all went well.
1626 status = le16_to_cpu(pp->crpb[out_index].flags);
1627 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1628 mv_err_intr(ap, qc);
1632 /* and finally, complete the ATA command */
1635 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1636 ata_qc_complete(qc);
1639 /* advance software response queue pointer, to
1640 * indicate (after the loop completes) to hardware
1641 * that we have consumed a response queue entry.
1648 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1649 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1650 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1654 * mv_host_intr - Handle all interrupts on the given host controller
1655 * @host: host specific structure
1656 * @relevant: port error bits relevant to this host controller
1657 * @hc: which host controller we're to look at
1659 * Read then write clear the HC interrupt status then walk each
1660 * port connected to the HC and see if it needs servicing. Port
1661 * success ints are reported in the HC interrupt status reg, the
1662 * port error ints are reported in the higher level main
1663 * interrupt status register and thus are passed in via the
1664 * 'relevant' argument.
1667 * Inherited from caller.
1669 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1671 struct mv_host_priv *hpriv = host->private_data;
1672 void __iomem *mmio = hpriv->base;
1673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1675 int port, port0, last_port;
1680 port0 = MV_PORTS_PER_HC;
1683 last_port = port0 + MV_PORTS_PER_HC;
1685 last_port = port0 + hpriv->n_ports;
1686 /* we'll need the HC success int register in most cases */
1687 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1691 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1693 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1694 hc, relevant, hc_irq_cause);
1696 for (port = port0; port < last_port; port++) {
1697 struct ata_port *ap = host->ports[port];
1698 struct mv_port_priv *pp;
1699 int have_err_bits, hard_port, shift;
1701 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1704 pp = ap->private_data;
1706 shift = port << 1; /* (port * 2) */
1707 if (port >= MV_PORTS_PER_HC)
1708 shift++; /* skip bit 8 in the HC Main IRQ reg */
1710 have_err_bits = ((PORT0_ERR << shift) & relevant);
1712 if (unlikely(have_err_bits)) {
1713 struct ata_queued_cmd *qc;
1715 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1716 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1719 mv_err_intr(ap, qc);
1723 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1725 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1726 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1729 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1736 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1738 struct mv_host_priv *hpriv = host->private_data;
1739 struct ata_port *ap;
1740 struct ata_queued_cmd *qc;
1741 struct ata_eh_info *ehi;
1742 unsigned int i, err_mask, printed = 0;
1745 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1747 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1750 DPRINTK("All regs @ PCI error\n");
1751 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1753 writelfl(0, mmio + hpriv->irq_cause_ofs);
1755 for (i = 0; i < host->n_ports; i++) {
1756 ap = host->ports[i];
1757 if (!ata_link_offline(&ap->link)) {
1758 ehi = &ap->link.eh_info;
1759 ata_ehi_clear_desc(ehi);
1761 ata_ehi_push_desc(ehi,
1762 "PCI err cause 0x%08x", err_cause);
1763 err_mask = AC_ERR_HOST_BUS;
1764 ehi->action = ATA_EH_RESET;
1765 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1767 qc->err_mask |= err_mask;
1769 ehi->err_mask |= err_mask;
1771 ata_port_freeze(ap);
1777 * mv_interrupt - Main interrupt event handler
1779 * @dev_instance: private data; in this case the host structure
1781 * Read the read only register to determine if any host
1782 * controllers have pending interrupts. If so, call lower level
1783 * routine to handle. Also check for PCI errors which are only
1787 * This routine holds the host lock while processing pending
1790 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1792 struct ata_host *host = dev_instance;
1793 struct mv_host_priv *hpriv = host->private_data;
1794 unsigned int hc, handled = 0, n_hcs;
1795 void __iomem *mmio = hpriv->base;
1796 u32 irq_stat, irq_mask;
1798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1799 spin_lock(&host->lock);
1801 irq_stat = readl(hpriv->main_cause_reg_addr);
1802 irq_mask = readl(hpriv->main_mask_reg_addr);
1804 /* check the cases where we either have nothing pending or have read
1805 * a bogus register value which can indicate HW removal or PCI fault
1807 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1810 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1812 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1813 mv_pci_error(host, mmio);
1815 goto out_unlock; /* skip all other HC irq handling */
1818 for (hc = 0; hc < n_hcs; hc++) {
1819 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1821 mv_host_intr(host, relevant, hc);
1827 spin_unlock(&host->lock);
1829 return IRQ_RETVAL(handled);
1832 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1836 switch (sc_reg_in) {
1840 ofs = sc_reg_in * sizeof(u32);
1849 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1851 struct mv_host_priv *hpriv = ap->host->private_data;
1852 void __iomem *mmio = hpriv->base;
1853 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1854 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1856 if (ofs != 0xffffffffU) {
1857 *val = readl(addr + ofs);
1863 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1865 struct mv_host_priv *hpriv = ap->host->private_data;
1866 void __iomem *mmio = hpriv->base;
1867 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1868 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1870 if (ofs != 0xffffffffU) {
1871 writelfl(val, addr + ofs);
1877 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1879 struct pci_dev *pdev = to_pci_dev(host->dev);
1882 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1885 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1887 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1890 mv_reset_pci_bus(host, mmio);
1893 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1895 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1898 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1901 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1904 tmp = readl(phy_mmio + MV5_PHY_MODE);
1906 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1907 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1910 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1914 writel(0, mmio + MV_GPIO_PORT_CTL);
1916 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1918 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1920 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1923 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1926 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1927 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1929 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1932 tmp = readl(phy_mmio + MV5_LT_MODE);
1934 writel(tmp, phy_mmio + MV5_LT_MODE);
1936 tmp = readl(phy_mmio + MV5_PHY_CTL);
1939 writel(tmp, phy_mmio + MV5_PHY_CTL);
1942 tmp = readl(phy_mmio + MV5_PHY_MODE);
1944 tmp |= hpriv->signal[port].pre;
1945 tmp |= hpriv->signal[port].amps;
1946 writel(tmp, phy_mmio + MV5_PHY_MODE);
1951 #define ZERO(reg) writel(0, port_mmio + (reg))
1952 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1955 void __iomem *port_mmio = mv_port_base(mmio, port);
1958 * The datasheet warns against setting ATA_RST when EDMA is active
1959 * (but doesn't say what the problem might be). So we first try
1960 * to disable the EDMA engine before doing the ATA_RST operation.
1962 mv_reset_channel(hpriv, mmio, port);
1964 ZERO(0x028); /* command */
1965 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1966 ZERO(0x004); /* timer */
1967 ZERO(0x008); /* irq err cause */
1968 ZERO(0x00c); /* irq err mask */
1969 ZERO(0x010); /* rq bah */
1970 ZERO(0x014); /* rq inp */
1971 ZERO(0x018); /* rq outp */
1972 ZERO(0x01c); /* respq bah */
1973 ZERO(0x024); /* respq outp */
1974 ZERO(0x020); /* respq inp */
1975 ZERO(0x02c); /* test control */
1976 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1980 #define ZERO(reg) writel(0, hc_mmio + (reg))
1981 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1984 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1992 tmp = readl(hc_mmio + 0x20);
1995 writel(tmp, hc_mmio + 0x20);
1999 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2002 unsigned int hc, port;
2004 for (hc = 0; hc < n_hc; hc++) {
2005 for (port = 0; port < MV_PORTS_PER_HC; port++)
2006 mv5_reset_hc_port(hpriv, mmio,
2007 (hc * MV_PORTS_PER_HC) + port);
2009 mv5_reset_one_hc(hpriv, mmio, hc);
2016 #define ZERO(reg) writel(0, mmio + (reg))
2017 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2019 struct mv_host_priv *hpriv = host->private_data;
2022 tmp = readl(mmio + MV_PCI_MODE);
2024 writel(tmp, mmio + MV_PCI_MODE);
2026 ZERO(MV_PCI_DISC_TIMER);
2027 ZERO(MV_PCI_MSI_TRIGGER);
2028 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2029 ZERO(HC_MAIN_IRQ_MASK_OFS);
2030 ZERO(MV_PCI_SERR_MASK);
2031 ZERO(hpriv->irq_cause_ofs);
2032 ZERO(hpriv->irq_mask_ofs);
2033 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2034 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2035 ZERO(MV_PCI_ERR_ATTRIBUTE);
2036 ZERO(MV_PCI_ERR_COMMAND);
2040 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2044 mv5_reset_flash(hpriv, mmio);
2046 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2048 tmp |= (1 << 5) | (1 << 6);
2049 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2053 * mv6_reset_hc - Perform the 6xxx global soft reset
2054 * @mmio: base address of the HBA
2056 * This routine only applies to 6xxx parts.
2059 * Inherited from caller.
2061 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2064 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2068 /* Following procedure defined in PCI "main command and status
2072 writel(t | STOP_PCI_MASTER, reg);
2074 for (i = 0; i < 1000; i++) {
2077 if (PCI_MASTER_EMPTY & t)
2080 if (!(PCI_MASTER_EMPTY & t)) {
2081 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2089 writel(t | GLOB_SFT_RST, reg);
2092 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2094 if (!(GLOB_SFT_RST & t)) {
2095 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2100 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2103 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2106 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2108 if (GLOB_SFT_RST & t) {
2109 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2113 * Temporary: wait 3 seconds before port-probing can happen,
2114 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2115 * This can go away once hotplug is fully/correctly implemented.
2123 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2126 void __iomem *port_mmio;
2129 tmp = readl(mmio + MV_RESET_CFG);
2130 if ((tmp & (1 << 0)) == 0) {
2131 hpriv->signal[idx].amps = 0x7 << 8;
2132 hpriv->signal[idx].pre = 0x1 << 5;
2136 port_mmio = mv_port_base(mmio, idx);
2137 tmp = readl(port_mmio + PHY_MODE2);
2139 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2140 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2143 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2145 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2148 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2151 void __iomem *port_mmio = mv_port_base(mmio, port);
2153 u32 hp_flags = hpriv->hp_flags;
2155 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2160 if (fix_phy_mode2) {
2161 m2 = readl(port_mmio + PHY_MODE2);
2164 writel(m2, port_mmio + PHY_MODE2);
2168 m2 = readl(port_mmio + PHY_MODE2);
2169 m2 &= ~((1 << 16) | (1 << 31));
2170 writel(m2, port_mmio + PHY_MODE2);
2175 /* who knows what this magic does */
2176 tmp = readl(port_mmio + PHY_MODE3);
2179 writel(tmp, port_mmio + PHY_MODE3);
2181 if (fix_phy_mode4) {
2184 m4 = readl(port_mmio + PHY_MODE4);
2186 if (hp_flags & MV_HP_ERRATA_60X1B2)
2187 tmp = readl(port_mmio + PHY_MODE3);
2189 /* workaround for errata FEr SATA#10 (part 1) */
2190 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2192 writel(m4, port_mmio + PHY_MODE4);
2194 if (hp_flags & MV_HP_ERRATA_60X1B2)
2195 writel(tmp, port_mmio + PHY_MODE3);
2198 /* Revert values of pre-emphasis and signal amps to the saved ones */
2199 m2 = readl(port_mmio + PHY_MODE2);
2201 m2 &= ~MV_M2_PREAMP_MASK;
2202 m2 |= hpriv->signal[port].amps;
2203 m2 |= hpriv->signal[port].pre;
2206 /* according to mvSata 3.6.1, some IIE values are fixed */
2207 if (IS_GEN_IIE(hpriv)) {
2212 writel(m2, port_mmio + PHY_MODE2);
2215 /* TODO: use the generic LED interface to configure the SATA Presence */
2216 /* & Acitivy LEDs on the board */
2217 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2223 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2226 void __iomem *port_mmio;
2229 port_mmio = mv_port_base(mmio, idx);
2230 tmp = readl(port_mmio + PHY_MODE2);
2232 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2233 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2237 #define ZERO(reg) writel(0, port_mmio + (reg))
2238 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2239 void __iomem *mmio, unsigned int port)
2241 void __iomem *port_mmio = mv_port_base(mmio, port);
2244 * The datasheet warns against setting ATA_RST when EDMA is active
2245 * (but doesn't say what the problem might be). So we first try
2246 * to disable the EDMA engine before doing the ATA_RST operation.
2248 mv_reset_channel(hpriv, mmio, port);
2250 ZERO(0x028); /* command */
2251 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2252 ZERO(0x004); /* timer */
2253 ZERO(0x008); /* irq err cause */
2254 ZERO(0x00c); /* irq err mask */
2255 ZERO(0x010); /* rq bah */
2256 ZERO(0x014); /* rq inp */
2257 ZERO(0x018); /* rq outp */
2258 ZERO(0x01c); /* respq bah */
2259 ZERO(0x024); /* respq outp */
2260 ZERO(0x020); /* respq inp */
2261 ZERO(0x02c); /* test control */
2262 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2267 #define ZERO(reg) writel(0, hc_mmio + (reg))
2268 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2271 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2281 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2282 void __iomem *mmio, unsigned int n_hc)
2286 for (port = 0; port < hpriv->n_ports; port++)
2287 mv_soc_reset_hc_port(hpriv, mmio, port);
2289 mv_soc_reset_one_hc(hpriv, mmio);
2294 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2300 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2305 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2307 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2309 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2311 ifctl |= (1 << 7); /* enable gen2i speed */
2312 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2316 * Caller must ensure that EDMA is not active,
2317 * by first doing mv_stop_edma() where needed.
2319 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2320 unsigned int port_no)
2322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2324 mv_stop_edma_engine(port_mmio);
2325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2327 if (!IS_GEN_I(hpriv)) {
2328 /* Enable 3.0gb/s link speed */
2329 mv_setup_ifctl(port_mmio, 1);
2332 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2333 * link, and physical layers. It resets all SATA interface registers
2334 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2336 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2337 udelay(25); /* allow reset propagation */
2338 writelfl(0, port_mmio + EDMA_CMD_OFS);
2340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2342 if (IS_GEN_I(hpriv))
2346 static void mv_pmp_select(struct ata_port *ap, int pmp)
2348 if (sata_pmp_supported(ap)) {
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2351 int old = reg & 0xf;
2354 reg = (reg & ~0xf) | pmp;
2355 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2360 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2361 unsigned long deadline)
2363 mv_pmp_select(link->ap, sata_srst_pmp(link));
2364 return sata_std_hardreset(link, class, deadline);
2367 static int mv_softreset(struct ata_link *link, unsigned int *class,
2368 unsigned long deadline)
2370 mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 return ata_sff_softreset(link, class, deadline);
2374 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2375 unsigned long deadline)
2377 struct ata_port *ap = link->ap;
2378 struct mv_host_priv *hpriv = ap->host->private_data;
2379 struct mv_port_priv *pp = ap->private_data;
2380 void __iomem *mmio = hpriv->base;
2381 int rc, attempts = 0, extra = 0;
2385 mv_reset_channel(hpriv, mmio, ap->port_no);
2386 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2388 /* Workaround for errata FEr SATA#10 (part 2) */
2390 const unsigned long *timing =
2391 sata_ehc_deb_timing(&link->eh_context);
2393 rc = sata_link_hardreset(link, timing, deadline + extra,
2397 sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2399 /* Force 1.5gb/s link speed and try again */
2400 mv_setup_ifctl(mv_ap_base(ap), 0);
2401 if (time_after(jiffies + HZ, deadline))
2402 extra = HZ; /* only extend it once, max */
2404 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2409 static void mv_eh_freeze(struct ata_port *ap)
2411 struct mv_host_priv *hpriv = ap->host->private_data;
2412 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2416 /* FIXME: handle coalescing completion events properly */
2418 shift = ap->port_no * 2;
2422 mask = 0x3 << shift;
2424 /* disable assertion of portN err, done events */
2425 tmp = readl(hpriv->main_mask_reg_addr);
2426 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2429 static void mv_eh_thaw(struct ata_port *ap)
2431 struct mv_host_priv *hpriv = ap->host->private_data;
2432 void __iomem *mmio = hpriv->base;
2433 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2434 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2435 void __iomem *port_mmio = mv_ap_base(ap);
2436 u32 tmp, mask, hc_irq_cause;
2437 unsigned int shift, hc_port_no = ap->port_no;
2439 /* FIXME: handle coalescing completion events properly */
2441 shift = ap->port_no * 2;
2447 mask = 0x3 << shift;
2449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2454 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2455 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2456 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2458 /* enable assertion of portN err, done events */
2459 tmp = readl(hpriv->main_mask_reg_addr);
2460 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2473 * Inherited from caller.
2475 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2480 /* PIO related setup
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2512 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
2516 u32 hp_flags = hpriv->hp_flags;
2518 switch (board_idx) {
2520 hpriv->ops = &mv5xxx_ops;
2521 hp_flags |= MV_HP_GEN_I;
2523 switch (pdev->revision) {
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2540 hpriv->ops = &mv5xxx_ops;
2541 hp_flags |= MV_HP_GEN_I;
2543 switch (pdev->revision) {
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2560 hpriv->ops = &mv6xxx_ops;
2561 hp_flags |= MV_HP_GEN_II;
2563 switch (pdev->revision) {
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
2571 dev_printk(KERN_WARNING, &pdev->dev,
2572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
2579 hp_flags |= MV_HP_PCIE;
2580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2598 * Warn the user, lest they think we're just buggy.
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
2610 hpriv->ops = &mv6xxx_ops;
2611 hp_flags |= MV_HP_GEN_IIE;
2613 switch (pdev->revision) {
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2633 dev_printk(KERN_ERR, host->dev,
2634 "BUG: invalid board index %u\n", board_idx);
2638 hpriv->hp_flags = hp_flags;
2639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2653 * mv_init_host - Perform some early initialization of the host.
2654 * @host: ATA host to initialize
2655 * @board_idx: controller index
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2661 * Inherited from caller.
2663 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2665 int rc = 0, n_hc, port, hc;
2666 struct mv_host_priv *hpriv = host->private_data;
2667 void __iomem *mmio = hpriv->base;
2669 rc = mv_chip_id(host, board_idx);
2673 if (HAS_PCI(host)) {
2674 hpriv->main_cause_reg_addr = hpriv->base +
2675 HC_MAIN_IRQ_CAUSE_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2678 hpriv->main_cause_reg_addr = hpriv->base +
2679 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2680 hpriv->main_mask_reg_addr = hpriv->base +
2681 HC_SOC_MAIN_IRQ_MASK_OFS;
2683 /* global interrupt mask */
2684 writel(0, hpriv->main_mask_reg_addr);
2686 n_hc = mv_get_hc_count(host->ports[0]->flags);
2688 for (port = 0; port < host->n_ports; port++)
2689 hpriv->ops->read_preamp(hpriv, port, mmio);
2691 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2695 hpriv->ops->reset_flash(hpriv, mmio);
2696 hpriv->ops->reset_bus(host, mmio);
2697 hpriv->ops->enable_leds(hpriv, mmio);
2699 for (port = 0; port < host->n_ports; port++) {
2700 struct ata_port *ap = host->ports[port];
2701 void __iomem *port_mmio = mv_port_base(mmio, port);
2703 mv_port_init(&ap->ioaddr, port_mmio);
2706 if (HAS_PCI(host)) {
2707 unsigned int offset = port_mmio - mmio;
2708 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2709 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2714 for (hc = 0; hc < n_hc; hc++) {
2715 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2717 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2718 "(before clear)=0x%08x\n", hc,
2719 readl(hc_mmio + HC_CFG_OFS),
2720 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2722 /* Clear any currently outstanding hc interrupt conditions */
2723 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2726 if (HAS_PCI(host)) {
2727 /* Clear any currently outstanding host interrupt conditions */
2728 writelfl(0, mmio + hpriv->irq_cause_ofs);
2730 /* and unmask interrupt generation for host regs */
2731 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2732 if (IS_GEN_I(hpriv))
2733 writelfl(~HC_MAIN_MASKED_IRQS_5,
2734 hpriv->main_mask_reg_addr);
2736 writelfl(~HC_MAIN_MASKED_IRQS,
2737 hpriv->main_mask_reg_addr);
2739 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2740 "PCI int cause/mask=0x%08x/0x%08x\n",
2741 readl(hpriv->main_cause_reg_addr),
2742 readl(hpriv->main_mask_reg_addr),
2743 readl(mmio + hpriv->irq_cause_ofs),
2744 readl(mmio + hpriv->irq_mask_ofs));
2746 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2747 hpriv->main_mask_reg_addr);
2748 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr));
2756 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2758 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2760 if (!hpriv->crqb_pool)
2763 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2765 if (!hpriv->crpb_pool)
2768 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2770 if (!hpriv->sg_tbl_pool)
2776 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2777 struct mbus_dram_target_info *dram)
2781 for (i = 0; i < 4; i++) {
2782 writel(0, hpriv->base + WINDOW_CTRL(i));
2783 writel(0, hpriv->base + WINDOW_BASE(i));
2786 for (i = 0; i < dram->num_cs; i++) {
2787 struct mbus_dram_window *cs = dram->cs + i;
2789 writel(((cs->size - 1) & 0xffff0000) |
2790 (cs->mbus_attr << 8) |
2791 (dram->mbus_dram_target_id << 4) | 1,
2792 hpriv->base + WINDOW_CTRL(i));
2793 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2798 * mv_platform_probe - handle a positive probe of an soc Marvell
2800 * @pdev: platform device found
2803 * Inherited from caller.
2805 static int mv_platform_probe(struct platform_device *pdev)
2807 static int printed_version;
2808 const struct mv_sata_platform_data *mv_platform_data;
2809 const struct ata_port_info *ppi[] =
2810 { &mv_port_info[chip_soc], NULL };
2811 struct ata_host *host;
2812 struct mv_host_priv *hpriv;
2813 struct resource *res;
2816 if (!printed_version++)
2817 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2820 * Simple resource validation ..
2822 if (unlikely(pdev->num_resources != 2)) {
2823 dev_err(&pdev->dev, "invalid number of resources\n");
2828 * Get the register base first
2830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2835 mv_platform_data = pdev->dev.platform_data;
2836 n_ports = mv_platform_data->n_ports;
2838 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2839 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2841 if (!host || !hpriv)
2843 host->private_data = hpriv;
2844 hpriv->n_ports = n_ports;
2847 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2848 res->end - res->start + 1);
2849 hpriv->base -= MV_SATAHC0_REG_BASE;
2852 * (Re-)program MBUS remapping windows if we are asked to.
2854 if (mv_platform_data->dram != NULL)
2855 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2857 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2861 /* initialize adapter */
2862 rc = mv_init_host(host, chip_soc);
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2870 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2871 IRQF_SHARED, &mv6_sht);
2876 * mv_platform_remove - unplug a platform interface
2877 * @pdev: platform device
2879 * A platform bus SATA device has been unplugged. Perform the needed
2880 * cleanup. Also called on module unload for any active devices.
2882 static int __devexit mv_platform_remove(struct platform_device *pdev)
2884 struct device *dev = &pdev->dev;
2885 struct ata_host *host = dev_get_drvdata(dev);
2887 ata_host_detach(host);
2891 static struct platform_driver mv_platform_driver = {
2892 .probe = mv_platform_probe,
2893 .remove = __devexit_p(mv_platform_remove),
2896 .owner = THIS_MODULE,
2902 static int mv_pci_init_one(struct pci_dev *pdev,
2903 const struct pci_device_id *ent);
2906 static struct pci_driver mv_pci_driver = {
2908 .id_table = mv_pci_tbl,
2909 .probe = mv_pci_init_one,
2910 .remove = ata_pci_remove_one,
2916 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2919 /* move to PCI layer or libata core? */
2920 static int pci_go_64(struct pci_dev *pdev)
2924 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "64-bit DMA enable failed\n");
2935 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "32-bit DMA enable failed\n");
2941 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 dev_printk(KERN_ERR, &pdev->dev,
2944 "32-bit consistent DMA enable failed\n");
2953 * mv_print_info - Dump key info to kernel log for perusal.
2954 * @host: ATA host to print info about
2956 * FIXME: complete this.
2959 * Inherited from caller.
2961 static void mv_print_info(struct ata_host *host)
2963 struct pci_dev *pdev = to_pci_dev(host->dev);
2964 struct mv_host_priv *hpriv = host->private_data;
2966 const char *scc_s, *gen;
2968 /* Use this to determine the HW stepping of the chip so we know
2969 * what errata to workaround
2971 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2974 else if (scc == 0x01)
2979 if (IS_GEN_I(hpriv))
2981 else if (IS_GEN_II(hpriv))
2983 else if (IS_GEN_IIE(hpriv))
2988 dev_printk(KERN_INFO, &pdev->dev,
2989 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2990 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2991 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2995 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2996 * @pdev: PCI device found
2997 * @ent: PCI device ID entry for the matched host
3000 * Inherited from caller.
3002 static int mv_pci_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent)
3005 static int printed_version;
3006 unsigned int board_idx = (unsigned int)ent->driver_data;
3007 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3008 struct ata_host *host;
3009 struct mv_host_priv *hpriv;
3012 if (!printed_version++)
3013 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3016 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3019 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3020 if (!host || !hpriv)
3022 host->private_data = hpriv;
3023 hpriv->n_ports = n_ports;
3025 /* acquire resources */
3026 rc = pcim_enable_device(pdev);
3030 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 pcim_pin_device(pdev);
3035 host->iomap = pcim_iomap_table(pdev);
3036 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3038 rc = pci_go_64(pdev);
3042 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3046 /* initialize adapter */
3047 rc = mv_init_host(host, board_idx);
3051 /* Enable interrupts */
3052 if (msi && pci_enable_msi(pdev))
3055 mv_dump_pci_cfg(pdev, 0x68);
3056 mv_print_info(host);
3058 pci_set_master(pdev);
3059 pci_try_set_mwi(pdev);
3060 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3061 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3065 static int mv_platform_probe(struct platform_device *pdev);
3066 static int __devexit mv_platform_remove(struct platform_device *pdev);
3068 static int __init mv_init(void)
3072 rc = pci_register_driver(&mv_pci_driver);
3076 rc = platform_driver_register(&mv_platform_driver);
3080 pci_unregister_driver(&mv_pci_driver);
3085 static void __exit mv_exit(void)
3088 pci_unregister_driver(&mv_pci_driver);
3090 platform_driver_unregister(&mv_platform_driver);
3093 MODULE_AUTHOR("Brett Russ");
3094 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3095 MODULE_LICENSE("GPL");
3096 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3097 MODULE_VERSION(DRV_VERSION);
3098 MODULE_ALIAS("platform:" DRV_NAME);
3101 module_param(msi, int, 0444);
3102 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3105 module_init(mv_init);
3106 module_exit(mv_exit);