2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
35 3) Add hotplug support (easy, once new-EH support appears)
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 6) Add port multiplier support (intermediate)
43 7) Test and verify 3.0 Gbps support
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
63 13) Verify that 7042 is fully supported. I only have a 6042.
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
147 CRPB_FLAG_STATUS_SHIFT = 8,
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
151 /* PCI interface registers */
153 PCI_COMMAND_OFS = 0xc00,
155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
219 SATA_INTERFACE_CTL = 0x050,
221 MV_M2_PREAMP_MASK = 0x7e0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
255 EDMA_ERR_LNK_DATA_RX |
256 EDMA_ERR_LNK_DATA_TX |
257 EDMA_ERR_TRANS_PROTO),
259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
268 EDMA_RSP_Q_PTR_SHIFT = 3,
275 EDMA_IORDY_TMOUT = 0x34,
278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
293 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
294 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
295 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
296 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
297 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
300 MV_DMA_BOUNDARY = 0xffffffffU,
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
317 /* Command ReQuest Block: 32B */
333 /* Command ResPonse Block: 8B */
340 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348 struct mv_port_priv {
349 struct mv_crqb *crqb;
351 struct mv_crpb *crpb;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
358 struct mv_port_signal {
365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
376 struct mv_host_priv {
378 struct mv_port_signal signal[8];
379 const struct mv_hw_ops *ops;
382 static void mv_irq_clear(struct ata_port *ap);
383 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
385 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
387 static void mv_phy_reset(struct ata_port *ap);
388 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
389 static int mv_port_start(struct ata_port *ap);
390 static void mv_port_stop(struct ata_port *ap);
391 static void mv_qc_prep(struct ata_queued_cmd *qc);
392 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
393 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
394 static void mv_eng_timeout(struct ata_port *ap);
395 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
397 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
399 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
402 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
404 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
407 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
409 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
412 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
414 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
416 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418 static void mv_stop_and_reset(struct ata_port *ap);
420 static struct scsi_host_template mv_sht = {
421 .module = THIS_MODULE,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
425 .can_queue = MV_USE_Q_DEPTH,
426 .this_id = ATA_SHT_THIS_ID,
427 .sg_tablesize = MV_MAX_SG_CT,
428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
434 .slave_destroy = ata_scsi_slave_destroy,
435 .bios_param = ata_std_bios_param,
438 static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
447 .phy_reset = mv_phy_reset,
448 .cable_detect = ata_cable_sata,
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
452 .data_xfer = ata_data_xfer,
454 .eng_timeout = mv_eng_timeout,
456 .irq_clear = mv_irq_clear,
457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
467 static const struct ata_port_operations mv6_ops = {
468 .port_disable = ata_port_disable,
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
476 .phy_reset = mv_phy_reset,
477 .cable_detect = ata_cable_sata,
479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
481 .data_xfer = ata_data_xfer,
483 .eng_timeout = mv_eng_timeout,
485 .irq_clear = mv_irq_clear,
486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
496 static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
505 .phy_reset = mv_phy_reset,
506 .cable_detect = ata_cable_sata,
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
510 .data_xfer = ata_data_xfer,
512 .eng_timeout = mv_eng_timeout,
514 .irq_clear = mv_irq_clear,
515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
525 static const struct ata_port_info mv_port_info[] = {
527 .flags = MV_COMMON_FLAGS,
528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = ATA_UDMA6,
530 .port_ops = &mv5_ops,
533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
534 .pio_mask = 0x1f, /* pio0-4 */
535 .udma_mask = ATA_UDMA6,
536 .port_ops = &mv5_ops,
539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
540 .pio_mask = 0x1f, /* pio0-4 */
541 .udma_mask = ATA_UDMA6,
542 .port_ops = &mv5_ops,
545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
546 .pio_mask = 0x1f, /* pio0-4 */
547 .udma_mask = ATA_UDMA6,
548 .port_ops = &mv6_ops,
551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = ATA_UDMA6,
555 .port_ops = &mv6_ops,
558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = ATA_UDMA6,
561 .port_ops = &mv_iie_ops,
564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = ATA_UDMA6,
567 .port_ops = &mv_iie_ops,
571 static const struct pci_device_id mv_pci_tbl[] = {
572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
586 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
588 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
590 /* add Marvell 7042 support */
591 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
593 { } /* terminate list */
596 static struct pci_driver mv_pci_driver = {
598 .id_table = mv_pci_tbl,
599 .probe = mv_init_one,
600 .remove = ata_pci_remove_one,
603 static const struct mv_hw_ops mv5xxx_ops = {
604 .phy_errata = mv5_phy_errata,
605 .enable_leds = mv5_enable_leds,
606 .read_preamp = mv5_read_preamp,
607 .reset_hc = mv5_reset_hc,
608 .reset_flash = mv5_reset_flash,
609 .reset_bus = mv5_reset_bus,
612 static const struct mv_hw_ops mv6xxx_ops = {
613 .phy_errata = mv6_phy_errata,
614 .enable_leds = mv6_enable_leds,
615 .read_preamp = mv6_read_preamp,
616 .reset_hc = mv6_reset_hc,
617 .reset_flash = mv6_reset_flash,
618 .reset_bus = mv_reset_pci_bus,
624 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
627 /* move to PCI layer or libata core? */
628 static int pci_go_64(struct pci_dev *pdev)
632 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
633 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
635 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
637 dev_printk(KERN_ERR, &pdev->dev,
638 "64-bit DMA enable failed\n");
643 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
645 dev_printk(KERN_ERR, &pdev->dev,
646 "32-bit DMA enable failed\n");
649 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
651 dev_printk(KERN_ERR, &pdev->dev,
652 "32-bit consistent DMA enable failed\n");
664 static inline void writelfl(unsigned long data, void __iomem *addr)
667 (void) readl(addr); /* flush to avoid PCI posted write */
670 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
672 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
675 static inline unsigned int mv_hc_from_port(unsigned int port)
677 return port >> MV_PORT_HC_SHIFT;
680 static inline unsigned int mv_hardport_from_port(unsigned int port)
682 return port & MV_PORT_MASK;
685 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
688 return mv_hc_base(base, mv_hc_from_port(port));
691 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
693 return mv_hc_base_from_port(base, port) +
694 MV_SATAHC_ARBTR_REG_SZ +
695 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
698 static inline void __iomem *mv_ap_base(struct ata_port *ap)
700 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
703 static inline int mv_get_hc_count(unsigned long port_flags)
705 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
708 static void mv_irq_clear(struct ata_port *ap)
713 * mv_start_dma - Enable eDMA engine
714 * @base: port base address
715 * @pp: port private data
717 * Verify the local cache of the eDMA state is accurate with a
721 * Inherited from caller.
723 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
725 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
726 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
729 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
733 * mv_stop_dma - Disable eDMA engine
734 * @ap: ATA channel to manipulate
736 * Verify the local cache of the eDMA state is accurate with a
740 * Inherited from caller.
742 static void mv_stop_dma(struct ata_port *ap)
744 void __iomem *port_mmio = mv_ap_base(ap);
745 struct mv_port_priv *pp = ap->private_data;
749 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 /* Disable EDMA if active. The disable bit auto clears.
752 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
753 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
755 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
758 /* now properly wait for the eDMA to stop */
759 for (i = 1000; i > 0; i--) {
760 reg = readl(port_mmio + EDMA_CMD_OFS);
761 if (!(EDMA_EN & reg)) {
768 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
769 /* FIXME: Consider doing a reset here to recover */
774 static void mv_dump_mem(void __iomem *start, unsigned bytes)
777 for (b = 0; b < bytes; ) {
778 DPRINTK("%p: ", start + b);
779 for (w = 0; b < bytes && w < 4; w++) {
780 printk("%08x ",readl(start + b));
788 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
793 for (b = 0; b < bytes; ) {
794 DPRINTK("%02x: ", b);
795 for (w = 0; b < bytes && w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw);
804 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
805 struct pci_dev *pdev)
808 void __iomem *hc_base = mv_hc_base(mmio_base,
809 port >> MV_PORT_HC_SHIFT);
810 void __iomem *port_base;
811 int start_port, num_ports, p, start_hc, num_hcs, hc;
814 start_hc = start_port = 0;
815 num_ports = 8; /* shld be benign for 4 port devs */
818 start_hc = port >> MV_PORT_HC_SHIFT;
820 num_ports = num_hcs = 1;
822 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
823 num_ports > 1 ? num_ports - 1 : start_port);
826 DPRINTK("PCI config space regs:\n");
827 mv_dump_pci_cfg(pdev, 0x68);
829 DPRINTK("PCI regs:\n");
830 mv_dump_mem(mmio_base+0xc00, 0x3c);
831 mv_dump_mem(mmio_base+0xd00, 0x34);
832 mv_dump_mem(mmio_base+0xf00, 0x4);
833 mv_dump_mem(mmio_base+0x1d00, 0x6c);
834 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
835 hc_base = mv_hc_base(mmio_base, hc);
836 DPRINTK("HC regs (HC %i):\n", hc);
837 mv_dump_mem(hc_base, 0x1c);
839 for (p = start_port; p < start_port + num_ports; p++) {
840 port_base = mv_port_base(mmio_base, p);
841 DPRINTK("EDMA regs (port %i):\n",p);
842 mv_dump_mem(port_base, 0x54);
843 DPRINTK("SATA regs (port %i):\n",p);
844 mv_dump_mem(port_base+0x300, 0x60);
849 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
857 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
860 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
869 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
871 unsigned int ofs = mv_scr_offset(sc_reg_in);
873 if (0xffffffffU != ofs)
874 return readl(mv_ap_base(ap) + ofs);
879 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
881 unsigned int ofs = mv_scr_offset(sc_reg_in);
883 if (0xffffffffU != ofs)
884 writelfl(val, mv_ap_base(ap) + ofs);
887 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
889 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
891 /* set up non-NCQ EDMA configuration */
892 cfg &= ~(1 << 9); /* disable equeue */
894 if (IS_GEN_I(hpriv)) {
895 cfg &= ~0x1f; /* clear queue depth */
896 cfg |= (1 << 8); /* enab config burst size mask */
899 else if (IS_GEN_II(hpriv)) {
900 cfg &= ~0x1f; /* clear queue depth */
901 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
902 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
905 else if (IS_GEN_IIE(hpriv)) {
906 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
907 cfg |= (1 << 22); /* enab 4-entry host queue cache */
908 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
909 cfg |= (1 << 18); /* enab early completion */
910 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
911 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
912 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
915 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
919 * mv_port_start - Port specific init/start routine.
920 * @ap: ATA channel to manipulate
922 * Allocate and point to DMA memory, init port private memory,
926 * Inherited from caller.
928 static int mv_port_start(struct ata_port *ap)
930 struct device *dev = ap->host->dev;
931 struct mv_host_priv *hpriv = ap->host->private_data;
932 struct mv_port_priv *pp;
933 void __iomem *port_mmio = mv_ap_base(ap);
938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
942 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
946 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
948 rc = ata_pad_alloc(ap, dev);
952 /* First item in chunk of DMA memory:
953 * 32-slot command request table (CRQB), 32 bytes each in size
956 pp->crqb_dma = mem_dma;
958 mem_dma += MV_CRQB_Q_SZ;
961 * 32-slot command response table (CRPB), 8 bytes each in size
964 pp->crpb_dma = mem_dma;
966 mem_dma += MV_CRPB_Q_SZ;
969 * Table of scatter-gather descriptors (ePRD), 16 bytes each
972 pp->sg_tbl_dma = mem_dma;
974 mv_edma_cfg(hpriv, port_mmio);
976 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
977 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
978 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
980 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 writelfl(pp->crqb_dma & 0xffffffff,
982 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
984 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
988 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
989 writelfl(pp->crpb_dma & 0xffffffff,
990 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
992 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
994 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
995 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
997 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access
1001 ap->private_data = pp;
1006 * mv_port_stop - Port specific cleanup/stop routine.
1007 * @ap: ATA channel to manipulate
1009 * Stop DMA, cleanup port memory.
1012 * This routine uses the host lock to protect the DMA stop.
1014 static void mv_port_stop(struct ata_port *ap)
1016 unsigned long flags;
1018 spin_lock_irqsave(&ap->host->lock, flags);
1020 spin_unlock_irqrestore(&ap->host->lock, flags);
1024 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025 * @qc: queued command whose SG list to source from
1027 * Populate the SG list and mark the last entry.
1030 * Inherited from caller.
1032 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1034 struct mv_port_priv *pp = qc->ap->private_data;
1035 unsigned int n_sg = 0;
1036 struct scatterlist *sg;
1037 struct mv_sg *mv_sg;
1040 ata_for_each_sg(sg, qc) {
1041 dma_addr_t addr = sg_dma_address(sg);
1042 u32 sg_len = sg_dma_len(sg);
1044 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1045 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1046 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1048 if (ata_sg_is_last(sg, qc))
1049 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1058 static inline unsigned mv_inc_q_index(unsigned index)
1060 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1063 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1065 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1066 (last ? CRQB_CMD_LAST : 0);
1067 *cmdw = cpu_to_le16(tmp);
1071 * mv_qc_prep - Host specific command preparation.
1072 * @qc: queued command to prepare
1074 * This routine simply redirects to the general purpose routine
1075 * if command is not DMA. Else, it handles prep of the CRQB
1076 * (command request block), does some sanity checking, and calls
1077 * the SG load routine.
1080 * Inherited from caller.
1082 static void mv_qc_prep(struct ata_queued_cmd *qc)
1084 struct ata_port *ap = qc->ap;
1085 struct mv_port_priv *pp = ap->private_data;
1087 struct ata_taskfile *tf;
1091 if (ATA_PROT_DMA != qc->tf.protocol)
1094 /* Fill in command request block
1096 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1097 flags |= CRQB_FLAG_READ;
1098 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1099 flags |= qc->tag << CRQB_TAG_SHIFT;
1101 /* get current queue index from hardware */
1102 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1103 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1105 pp->crqb[in_index].sg_addr =
1106 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1107 pp->crqb[in_index].sg_addr_hi =
1108 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1109 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1111 cw = &pp->crqb[in_index].ata_cmd[0];
1114 /* Sadly, the CRQB cannot accomodate all registers--there are
1115 * only 11 bytes...so we must pick and choose required
1116 * registers based on the command. So, we drop feature and
1117 * hob_feature for [RW] DMA commands, but they are needed for
1118 * NCQ. NCQ will drop hob_nsect.
1120 switch (tf->command) {
1122 case ATA_CMD_READ_EXT:
1124 case ATA_CMD_WRITE_EXT:
1125 case ATA_CMD_WRITE_FUA_EXT:
1126 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1128 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1129 case ATA_CMD_FPDMA_READ:
1130 case ATA_CMD_FPDMA_WRITE:
1131 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1132 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1134 #endif /* FIXME: remove this line when NCQ added */
1136 /* The only other commands EDMA supports in non-queued and
1137 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 * of which are defined/used by Linux. If we get here, this
1139 * driver needs work.
1141 * FIXME: modify libata to give qc_prep a return value and
1142 * return error here.
1144 BUG_ON(tf->command);
1147 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1148 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1149 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1150 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1151 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1152 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1153 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1154 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1155 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1157 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1163 * mv_qc_prep_iie - Host specific command preparation.
1164 * @qc: queued command to prepare
1166 * This routine simply redirects to the general purpose routine
1167 * if command is not DMA. Else, it handles prep of the CRQB
1168 * (command request block), does some sanity checking, and calls
1169 * the SG load routine.
1172 * Inherited from caller.
1174 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1176 struct ata_port *ap = qc->ap;
1177 struct mv_port_priv *pp = ap->private_data;
1178 struct mv_crqb_iie *crqb;
1179 struct ata_taskfile *tf;
1183 if (ATA_PROT_DMA != qc->tf.protocol)
1186 /* Fill in Gen IIE command request block
1188 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1189 flags |= CRQB_FLAG_READ;
1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1192 flags |= qc->tag << CRQB_TAG_SHIFT;
1194 /* get current queue index from hardware */
1195 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1196 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1198 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1199 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1201 crqb->flags = cpu_to_le32(flags);
1204 crqb->ata_cmd[0] = cpu_to_le32(
1205 (tf->command << 16) |
1208 crqb->ata_cmd[1] = cpu_to_le32(
1214 crqb->ata_cmd[2] = cpu_to_le32(
1215 (tf->hob_lbal << 0) |
1216 (tf->hob_lbam << 8) |
1217 (tf->hob_lbah << 16) |
1218 (tf->hob_feature << 24)
1220 crqb->ata_cmd[3] = cpu_to_le32(
1222 (tf->hob_nsect << 8)
1225 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1231 * mv_qc_issue - Initiate a command to the host
1232 * @qc: queued command to start
1234 * This routine simply redirects to the general purpose routine
1235 * if command is not DMA. Else, it sanity checks our local
1236 * caches of the request producer/consumer indices then enables
1237 * DMA and bumps the request producer index.
1240 * Inherited from caller.
1242 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1244 void __iomem *port_mmio = mv_ap_base(qc->ap);
1245 struct mv_port_priv *pp = qc->ap->private_data;
1249 if (ATA_PROT_DMA != qc->tf.protocol) {
1250 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers.
1254 mv_stop_dma(qc->ap);
1255 return ata_qc_issue_prot(qc);
1258 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1259 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1261 /* until we do queuing, the queue should be empty at this point */
1262 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1263 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1265 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1267 mv_start_dma(port_mmio, pp);
1269 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1271 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1272 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1278 * mv_get_crpb_status - get status from most recently completed cmd
1279 * @ap: ATA channel to manipulate
1281 * This routine is for use when the port is in DMA mode, when it
1282 * will be using the CRPB (command response block) method of
1283 * returning command completion information. We check indices
1284 * are good, grab status, and bump the response consumer index to
1285 * prove that we're up to date.
1288 * Inherited from caller.
1290 static u8 mv_get_crpb_status(struct ata_port *ap)
1292 void __iomem *port_mmio = mv_ap_base(ap);
1293 struct mv_port_priv *pp = ap->private_data;
1298 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1299 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1301 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1302 >> CRPB_FLAG_STATUS_SHIFT;
1304 /* increment our consumer index... */
1305 out_index = mv_inc_q_index(out_index);
1307 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1308 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1309 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1311 /* write out our inc'd consumer index so EDMA knows we're caught up */
1312 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1313 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1314 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1316 /* Return ATA status register for completed CRPB */
1321 * mv_err_intr - Handle error interrupts on the port
1322 * @ap: ATA channel to manipulate
1323 * @reset_allowed: bool: 0 == don't trigger from reset here
1325 * In most cases, just clear the interrupt and move on. However,
1326 * some cases require an eDMA reset, which is done right before
1327 * the COMRESET in mv_phy_reset(). The SERR case requires a
1328 * clear of pending errors in the SATA SERROR register. Finally,
1329 * if the port disabled DMA, update our cached copy to match.
1332 * Inherited from caller.
1334 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1336 void __iomem *port_mmio = mv_ap_base(ap);
1337 u32 edma_err_cause, serr = 0;
1339 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1341 if (EDMA_ERR_SERR & edma_err_cause) {
1342 sata_scr_read(ap, SCR_ERROR, &serr);
1343 sata_scr_write_flush(ap, SCR_ERROR, serr);
1345 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1346 struct mv_port_priv *pp = ap->private_data;
1347 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1349 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1350 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1352 /* Clear EDMA now that SERR cleanup done */
1353 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1355 /* check for fatal here and recover if needed */
1356 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1357 mv_stop_and_reset(ap);
1361 * mv_host_intr - Handle all interrupts on the given host controller
1362 * @host: host specific structure
1363 * @relevant: port error bits relevant to this host controller
1364 * @hc: which host controller we're to look at
1366 * Read then write clear the HC interrupt status then walk each
1367 * port connected to the HC and see if it needs servicing. Port
1368 * success ints are reported in the HC interrupt status reg, the
1369 * port error ints are reported in the higher level main
1370 * interrupt status register and thus are passed in via the
1371 * 'relevant' argument.
1374 * Inherited from caller.
1376 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1378 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1379 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1380 struct ata_queued_cmd *qc;
1382 int shift, port, port0, hard_port, handled;
1383 unsigned int err_mask;
1388 port0 = MV_PORTS_PER_HC;
1390 /* we'll need the HC success int register in most cases */
1391 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1393 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1395 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 hc,relevant,hc_irq_cause);
1398 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1400 struct ata_port *ap = host->ports[port];
1401 struct mv_port_priv *pp = ap->private_data;
1403 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1404 handled = 0; /* ensure ata_status is set if handled++ */
1406 /* Note that DEV_IRQ might happen spuriously during EDMA,
1407 * and should be ignored in such cases.
1408 * The cause of this is still under investigation.
1410 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1411 /* EDMA: check for response queue interrupt */
1412 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1413 ata_status = mv_get_crpb_status(ap);
1417 /* PIO: check for device (drive) interrupt */
1418 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1419 ata_status = readb(ap->ioaddr.status_addr);
1421 /* ignore spurious intr if drive still BUSY */
1422 if (ata_status & ATA_BUSY) {
1429 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1432 err_mask = ac_err_mask(ata_status);
1434 shift = port << 1; /* (port * 2) */
1435 if (port >= MV_PORTS_PER_HC) {
1436 shift++; /* skip bit 8 in the HC Main IRQ reg */
1438 if ((PORT0_ERR << shift) & relevant) {
1440 err_mask |= AC_ERR_OTHER;
1445 qc = ata_qc_from_tag(ap, ap->active_tag);
1446 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1447 VPRINTK("port %u IRQ found for qc, "
1448 "ata_status 0x%x\n", port,ata_status);
1449 /* mark qc status appropriately */
1450 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1451 qc->err_mask |= err_mask;
1452 ata_qc_complete(qc);
1463 * @dev_instance: private data; in this case the host structure
1466 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level
1468 * routine to handle. Also check for PCI errors which are only
1472 * This routine holds the host lock while processing pending
1475 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1477 struct ata_host *host = dev_instance;
1478 unsigned int hc, handled = 0, n_hcs;
1479 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1480 struct mv_host_priv *hpriv;
1483 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1485 /* check the cases where we either have nothing pending or have read
1486 * a bogus register value which can indicate HW removal or PCI fault
1488 if (!irq_stat || (0xffffffffU == irq_stat))
1491 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1492 spin_lock(&host->lock);
1494 for (hc = 0; hc < n_hcs; hc++) {
1495 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1497 mv_host_intr(host, relevant, hc);
1502 hpriv = host->private_data;
1503 if (IS_60XX(hpriv)) {
1504 /* deal with the interrupt coalescing bits */
1505 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1506 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1507 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1508 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1512 if (PCI_ERR & irq_stat) {
1513 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 readl(mmio + PCI_IRQ_CAUSE_OFS));
1516 DPRINTK("All regs @ PCI error\n");
1517 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1519 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1522 spin_unlock(&host->lock);
1524 return IRQ_RETVAL(handled);
1527 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1529 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1530 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1532 return hc_mmio + ofs;
1535 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1539 switch (sc_reg_in) {
1543 ofs = sc_reg_in * sizeof(u32);
1552 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1554 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1555 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1556 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1558 if (ofs != 0xffffffffU)
1559 return readl(addr + ofs);
1564 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1566 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1567 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1568 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1570 if (ofs != 0xffffffffU)
1571 writelfl(val, addr + ofs);
1574 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1578 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1581 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1583 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1586 mv_reset_pci_bus(pdev, mmio);
1589 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1591 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1594 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1597 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1600 tmp = readl(phy_mmio + MV5_PHY_MODE);
1602 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1603 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1610 writel(0, mmio + MV_GPIO_PORT_CTL);
1612 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1614 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1616 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1619 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1622 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1623 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1625 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1628 tmp = readl(phy_mmio + MV5_LT_MODE);
1630 writel(tmp, phy_mmio + MV5_LT_MODE);
1632 tmp = readl(phy_mmio + MV5_PHY_CTL);
1635 writel(tmp, phy_mmio + MV5_PHY_CTL);
1638 tmp = readl(phy_mmio + MV5_PHY_MODE);
1640 tmp |= hpriv->signal[port].pre;
1641 tmp |= hpriv->signal[port].amps;
1642 writel(tmp, phy_mmio + MV5_PHY_MODE);
1647 #define ZERO(reg) writel(0, port_mmio + (reg))
1648 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1651 void __iomem *port_mmio = mv_port_base(mmio, port);
1653 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1655 mv_channel_reset(hpriv, mmio, port);
1657 ZERO(0x028); /* command */
1658 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1659 ZERO(0x004); /* timer */
1660 ZERO(0x008); /* irq err cause */
1661 ZERO(0x00c); /* irq err mask */
1662 ZERO(0x010); /* rq bah */
1663 ZERO(0x014); /* rq inp */
1664 ZERO(0x018); /* rq outp */
1665 ZERO(0x01c); /* respq bah */
1666 ZERO(0x024); /* respq outp */
1667 ZERO(0x020); /* respq inp */
1668 ZERO(0x02c); /* test control */
1669 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1673 #define ZERO(reg) writel(0, hc_mmio + (reg))
1674 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1677 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1685 tmp = readl(hc_mmio + 0x20);
1688 writel(tmp, hc_mmio + 0x20);
1692 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1695 unsigned int hc, port;
1697 for (hc = 0; hc < n_hc; hc++) {
1698 for (port = 0; port < MV_PORTS_PER_HC; port++)
1699 mv5_reset_hc_port(hpriv, mmio,
1700 (hc * MV_PORTS_PER_HC) + port);
1702 mv5_reset_one_hc(hpriv, mmio, hc);
1709 #define ZERO(reg) writel(0, mmio + (reg))
1710 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1714 tmp = readl(mmio + MV_PCI_MODE);
1716 writel(tmp, mmio + MV_PCI_MODE);
1718 ZERO(MV_PCI_DISC_TIMER);
1719 ZERO(MV_PCI_MSI_TRIGGER);
1720 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1721 ZERO(HC_MAIN_IRQ_MASK_OFS);
1722 ZERO(MV_PCI_SERR_MASK);
1723 ZERO(PCI_IRQ_CAUSE_OFS);
1724 ZERO(PCI_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1726 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1727 ZERO(MV_PCI_ERR_ATTRIBUTE);
1728 ZERO(MV_PCI_ERR_COMMAND);
1732 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1736 mv5_reset_flash(hpriv, mmio);
1738 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1740 tmp |= (1 << 5) | (1 << 6);
1741 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1745 * mv6_reset_hc - Perform the 6xxx global soft reset
1746 * @mmio: base address of the HBA
1748 * This routine only applies to 6xxx parts.
1751 * Inherited from caller.
1753 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1756 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1760 /* Following procedure defined in PCI "main command and status
1764 writel(t | STOP_PCI_MASTER, reg);
1766 for (i = 0; i < 1000; i++) {
1769 if (PCI_MASTER_EMPTY & t) {
1773 if (!(PCI_MASTER_EMPTY & t)) {
1774 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1782 writel(t | GLOB_SFT_RST, reg);
1785 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1787 if (!(GLOB_SFT_RST & t)) {
1788 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1793 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1796 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1799 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1801 if (GLOB_SFT_RST & t) {
1802 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1809 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1812 void __iomem *port_mmio;
1815 tmp = readl(mmio + MV_RESET_CFG);
1816 if ((tmp & (1 << 0)) == 0) {
1817 hpriv->signal[idx].amps = 0x7 << 8;
1818 hpriv->signal[idx].pre = 0x1 << 5;
1822 port_mmio = mv_port_base(mmio, idx);
1823 tmp = readl(port_mmio + PHY_MODE2);
1825 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1826 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1829 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1831 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1834 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1837 void __iomem *port_mmio = mv_port_base(mmio, port);
1839 u32 hp_flags = hpriv->hp_flags;
1841 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1843 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1846 if (fix_phy_mode2) {
1847 m2 = readl(port_mmio + PHY_MODE2);
1850 writel(m2, port_mmio + PHY_MODE2);
1854 m2 = readl(port_mmio + PHY_MODE2);
1855 m2 &= ~((1 << 16) | (1 << 31));
1856 writel(m2, port_mmio + PHY_MODE2);
1861 /* who knows what this magic does */
1862 tmp = readl(port_mmio + PHY_MODE3);
1865 writel(tmp, port_mmio + PHY_MODE3);
1867 if (fix_phy_mode4) {
1870 m4 = readl(port_mmio + PHY_MODE4);
1872 if (hp_flags & MV_HP_ERRATA_60X1B2)
1873 tmp = readl(port_mmio + 0x310);
1875 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1877 writel(m4, port_mmio + PHY_MODE4);
1879 if (hp_flags & MV_HP_ERRATA_60X1B2)
1880 writel(tmp, port_mmio + 0x310);
1883 /* Revert values of pre-emphasis and signal amps to the saved ones */
1884 m2 = readl(port_mmio + PHY_MODE2);
1886 m2 &= ~MV_M2_PREAMP_MASK;
1887 m2 |= hpriv->signal[port].amps;
1888 m2 |= hpriv->signal[port].pre;
1891 /* according to mvSata 3.6.1, some IIE values are fixed */
1892 if (IS_GEN_IIE(hpriv)) {
1897 writel(m2, port_mmio + PHY_MODE2);
1900 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port_no)
1903 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1905 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1907 if (IS_60XX(hpriv)) {
1908 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1909 ifctl |= (1 << 7); /* enable gen2i speed */
1910 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1911 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1914 udelay(25); /* allow reset propagation */
1916 /* Spec never mentions clearing the bit. Marvell's driver does
1917 * clear the bit, however.
1919 writelfl(0, port_mmio + EDMA_CMD_OFS);
1921 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1927 static void mv_stop_and_reset(struct ata_port *ap)
1929 struct mv_host_priv *hpriv = ap->host->private_data;
1930 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1934 mv_channel_reset(hpriv, mmio, ap->port_no);
1936 __mv_phy_reset(ap, 0);
1939 static inline void __msleep(unsigned int msec, int can_sleep)
1948 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1949 * @ap: ATA channel to manipulate
1951 * Part of this is taken from __sata_phy_reset and modified to
1952 * not sleep since this routine gets called from interrupt level.
1955 * Inherited from caller. This is coded to safe to call at
1956 * interrupt level, i.e. it does not sleep.
1958 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1960 struct mv_port_priv *pp = ap->private_data;
1961 struct mv_host_priv *hpriv = ap->host->private_data;
1962 void __iomem *port_mmio = mv_ap_base(ap);
1963 struct ata_taskfile tf;
1964 struct ata_device *dev = &ap->device[0];
1965 unsigned long timeout;
1969 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1971 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1972 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1973 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1975 /* Issue COMRESET via SControl */
1977 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1978 __msleep(1, can_sleep);
1980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1981 __msleep(20, can_sleep);
1983 timeout = jiffies + msecs_to_jiffies(200);
1985 sata_scr_read(ap, SCR_STATUS, &sstatus);
1986 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1989 __msleep(1, can_sleep);
1990 } while (time_before(jiffies, timeout));
1992 /* work around errata */
1993 if (IS_60XX(hpriv) &&
1994 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1996 goto comreset_retry;
1998 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1999 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2000 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2002 if (ata_port_online(ap)) {
2005 sata_scr_read(ap, SCR_STATUS, &sstatus);
2006 ata_port_printk(ap, KERN_INFO,
2007 "no device found (phy stat %08x)\n", sstatus);
2008 ata_port_disable(ap);
2012 /* even after SStatus reflects that device is ready,
2013 * it seems to take a while for link to be fully
2014 * established (and thus Status no longer 0x80/0x7F),
2015 * so we poll a bit for that, here.
2019 u8 drv_stat = ata_check_status(ap);
2020 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2022 __msleep(500, can_sleep);
2027 tf.lbah = readb(ap->ioaddr.lbah_addr);
2028 tf.lbam = readb(ap->ioaddr.lbam_addr);
2029 tf.lbal = readb(ap->ioaddr.lbal_addr);
2030 tf.nsect = readb(ap->ioaddr.nsect_addr);
2032 dev->class = ata_dev_classify(&tf);
2033 if (!ata_dev_enabled(dev)) {
2034 VPRINTK("Port disabled post-sig: No device present.\n");
2035 ata_port_disable(ap);
2038 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2040 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2045 static void mv_phy_reset(struct ata_port *ap)
2047 __mv_phy_reset(ap, 1);
2051 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2052 * @ap: ATA channel to manipulate
2054 * Intent is to clear all pending error conditions, reset the
2055 * chip/bus, fail the command, and move on.
2058 * This routine holds the host lock while failing the command.
2060 static void mv_eng_timeout(struct ata_port *ap)
2062 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2063 struct ata_queued_cmd *qc;
2064 unsigned long flags;
2066 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2067 DPRINTK("All regs @ start of eng_timeout\n");
2068 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2070 qc = ata_qc_from_tag(ap, ap->active_tag);
2071 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2072 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2074 spin_lock_irqsave(&ap->host->lock, flags);
2076 mv_stop_and_reset(ap);
2077 spin_unlock_irqrestore(&ap->host->lock, flags);
2079 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2080 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2081 qc->err_mask |= AC_ERR_TIMEOUT;
2082 ata_eh_qc_complete(qc);
2087 * mv_port_init - Perform some early initialization on a single port.
2088 * @port: libata data structure storing shadow register addresses
2089 * @port_mmio: base address of the port
2091 * Initialize shadow register mmio addresses, clear outstanding
2092 * interrupts on the port, and unmask interrupts for the future
2093 * start of the port.
2096 * Inherited from caller.
2098 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2100 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2103 /* PIO related setup
2105 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2107 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2108 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2109 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2110 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2111 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2112 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2114 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2115 /* special case: control/altstatus doesn't have ATA_REG_ address */
2116 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2119 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2121 /* Clear any currently outstanding port interrupt conditions */
2122 serr_ofs = mv_scr_offset(SCR_ERROR);
2123 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2124 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2126 /* unmask all EDMA error interrupts */
2127 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2129 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2130 readl(port_mmio + EDMA_CFG_OFS),
2131 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2132 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2135 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2137 struct pci_dev *pdev = to_pci_dev(host->dev);
2138 struct mv_host_priv *hpriv = host->private_data;
2139 u32 hp_flags = hpriv->hp_flags;
2143 hpriv->ops = &mv5xxx_ops;
2144 hp_flags |= MV_HP_50XX;
2146 switch (pdev->revision) {
2148 hp_flags |= MV_HP_ERRATA_50XXB0;
2151 hp_flags |= MV_HP_ERRATA_50XXB2;
2154 dev_printk(KERN_WARNING, &pdev->dev,
2155 "Applying 50XXB2 workarounds to unknown rev\n");
2156 hp_flags |= MV_HP_ERRATA_50XXB2;
2163 hpriv->ops = &mv5xxx_ops;
2164 hp_flags |= MV_HP_50XX;
2166 switch (pdev->revision) {
2168 hp_flags |= MV_HP_ERRATA_50XXB0;
2171 hp_flags |= MV_HP_ERRATA_50XXB2;
2174 dev_printk(KERN_WARNING, &pdev->dev,
2175 "Applying B2 workarounds to unknown rev\n");
2176 hp_flags |= MV_HP_ERRATA_50XXB2;
2183 hpriv->ops = &mv6xxx_ops;
2185 switch (pdev->revision) {
2187 hp_flags |= MV_HP_ERRATA_60X1B2;
2190 hp_flags |= MV_HP_ERRATA_60X1C0;
2193 dev_printk(KERN_WARNING, &pdev->dev,
2194 "Applying B2 workarounds to unknown rev\n");
2195 hp_flags |= MV_HP_ERRATA_60X1B2;
2202 hpriv->ops = &mv6xxx_ops;
2204 hp_flags |= MV_HP_GEN_IIE;
2206 switch (pdev->revision) {
2208 hp_flags |= MV_HP_ERRATA_XX42A0;
2211 hp_flags |= MV_HP_ERRATA_60X1C0;
2214 dev_printk(KERN_WARNING, &pdev->dev,
2215 "Applying 60X1C0 workarounds to unknown rev\n");
2216 hp_flags |= MV_HP_ERRATA_60X1C0;
2222 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2226 hpriv->hp_flags = hp_flags;
2232 * mv_init_host - Perform some early initialization of the host.
2233 * @host: ATA host to initialize
2234 * @board_idx: controller index
2236 * If possible, do an early global reset of the host. Then do
2237 * our port init and clear/unmask all/relevant host interrupts.
2240 * Inherited from caller.
2242 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2244 int rc = 0, n_hc, port, hc;
2245 struct pci_dev *pdev = to_pci_dev(host->dev);
2246 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2247 struct mv_host_priv *hpriv = host->private_data;
2249 /* global interrupt mask */
2250 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2252 rc = mv_chip_id(host, board_idx);
2256 n_hc = mv_get_hc_count(host->ports[0]->flags);
2258 for (port = 0; port < host->n_ports; port++)
2259 hpriv->ops->read_preamp(hpriv, port, mmio);
2261 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2265 hpriv->ops->reset_flash(hpriv, mmio);
2266 hpriv->ops->reset_bus(pdev, mmio);
2267 hpriv->ops->enable_leds(hpriv, mmio);
2269 for (port = 0; port < host->n_ports; port++) {
2270 if (IS_60XX(hpriv)) {
2271 void __iomem *port_mmio = mv_port_base(mmio, port);
2273 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2274 ifctl |= (1 << 7); /* enable gen2i speed */
2275 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2276 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2279 hpriv->ops->phy_errata(hpriv, mmio, port);
2282 for (port = 0; port < host->n_ports; port++) {
2283 void __iomem *port_mmio = mv_port_base(mmio, port);
2284 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2287 for (hc = 0; hc < n_hc; hc++) {
2288 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2290 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2291 "(before clear)=0x%08x\n", hc,
2292 readl(hc_mmio + HC_CFG_OFS),
2293 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2295 /* Clear any currently outstanding hc interrupt conditions */
2296 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2299 /* Clear any currently outstanding host interrupt conditions */
2300 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2302 /* and unmask interrupt generation for host regs */
2303 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2306 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2308 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2310 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2311 "PCI int cause/mask=0x%08x/0x%08x\n",
2312 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2313 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2314 readl(mmio + PCI_IRQ_CAUSE_OFS),
2315 readl(mmio + PCI_IRQ_MASK_OFS));
2322 * mv_print_info - Dump key info to kernel log for perusal.
2323 * @host: ATA host to print info about
2325 * FIXME: complete this.
2328 * Inherited from caller.
2330 static void mv_print_info(struct ata_host *host)
2332 struct pci_dev *pdev = to_pci_dev(host->dev);
2333 struct mv_host_priv *hpriv = host->private_data;
2335 const char *scc_s, *gen;
2337 /* Use this to determine the HW stepping of the chip so we know
2338 * what errata to workaround
2340 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2343 else if (scc == 0x01)
2348 if (IS_GEN_I(hpriv))
2350 else if (IS_GEN_II(hpriv))
2352 else if (IS_GEN_IIE(hpriv))
2357 dev_printk(KERN_INFO, &pdev->dev,
2358 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2359 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2360 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2364 * mv_init_one - handle a positive probe of a Marvell host
2365 * @pdev: PCI device found
2366 * @ent: PCI device ID entry for the matched host
2369 * Inherited from caller.
2371 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2373 static int printed_version = 0;
2374 unsigned int board_idx = (unsigned int)ent->driver_data;
2375 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2376 struct ata_host *host;
2377 struct mv_host_priv *hpriv;
2380 if (!printed_version++)
2381 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2384 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2386 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2387 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2388 if (!host || !hpriv)
2390 host->private_data = hpriv;
2392 /* acquire resources */
2393 rc = pcim_enable_device(pdev);
2397 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2399 pcim_pin_device(pdev);
2402 host->iomap = pcim_iomap_table(pdev);
2404 rc = pci_go_64(pdev);
2408 /* initialize adapter */
2409 rc = mv_init_host(host, board_idx);
2413 /* Enable interrupts */
2414 if (msi && pci_enable_msi(pdev))
2417 mv_dump_pci_cfg(pdev, 0x68);
2418 mv_print_info(host);
2420 pci_set_master(pdev);
2421 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2425 static int __init mv_init(void)
2427 return pci_register_driver(&mv_pci_driver);
2430 static void __exit mv_exit(void)
2432 pci_unregister_driver(&mv_pci_driver);
2435 MODULE_AUTHOR("Brett Russ");
2436 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2437 MODULE_LICENSE("GPL");
2438 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2439 MODULE_VERSION(DRV_VERSION);
2441 module_param(msi, int, 0444);
2442 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2444 module_init(mv_init);
2445 module_exit(mv_exit);