2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/device.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <linux/libata.h>
37 #define DRV_NAME "sata_mv"
38 #define DRV_VERSION "0.7"
41 /* BAR's are enumerated in terms of pci_resource_start() terms */
42 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
43 MV_IO_BAR = 2, /* offset 0x18: IO space */
44 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
46 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
47 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
50 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
51 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
52 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
53 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
54 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
55 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
57 MV_SATAHC0_REG_BASE = 0x20000,
58 MV_FLASH_CTL = 0x1046c,
59 MV_GPIO_PORT_CTL = 0x104f0,
60 MV_RESET_CFG = 0x180d8,
62 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
63 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
64 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
65 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
67 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
70 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
72 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
73 * CRPB needs alignment on a 256B boundary. Size == 256B
74 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
75 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
77 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
78 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
80 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
81 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
86 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
90 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
91 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
92 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
93 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
94 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
95 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
97 CRQB_FLAG_READ = (1 << 0),
99 CRQB_CMD_ADDR_SHIFT = 8,
100 CRQB_CMD_CS = (0x2 << 11),
101 CRQB_CMD_LAST = (1 << 15),
103 CRPB_FLAG_STATUS_SHIFT = 8,
105 EPRD_FLAG_END_OF_TBL = (1 << 31),
107 /* PCI interface registers */
109 PCI_COMMAND_OFS = 0xc00,
111 PCI_MAIN_CMD_STS_OFS = 0xd30,
112 STOP_PCI_MASTER = (1 << 2),
113 PCI_MASTER_EMPTY = (1 << 3),
114 GLOB_SFT_RST = (1 << 4),
117 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
118 MV_PCI_DISC_TIMER = 0xd04,
119 MV_PCI_MSI_TRIGGER = 0xc38,
120 MV_PCI_SERR_MASK = 0xc28,
121 MV_PCI_XBAR_TMOUT = 0x1d04,
122 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
123 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
124 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
125 MV_PCI_ERR_COMMAND = 0x1d50,
127 PCI_IRQ_CAUSE_OFS = 0x1d58,
128 PCI_IRQ_MASK_OFS = 0x1d5c,
129 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
131 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
132 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
133 PORT0_ERR = (1 << 0), /* shift by port # */
134 PORT0_DONE = (1 << 1), /* shift by port # */
135 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
136 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
138 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
139 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
140 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
141 GPIO_INT = (1 << 22),
142 SELF_INT = (1 << 23),
143 TWSI_INT = (1 << 24),
144 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
145 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
146 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149 /* SATAHC registers */
152 HC_IRQ_CAUSE_OFS = 0x14,
153 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
154 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
155 DEV_IRQ = (1 << 8), /* shift by port # */
157 /* Shadow block registers */
159 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
162 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
163 SATA_ACTIVE_OFS = 0x350,
170 SATA_INTERFACE_CTL = 0x050,
172 MV_M2_PREAMP_MASK = 0x7e0,
176 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
177 EDMA_CFG_NCQ = (1 << 5),
178 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
179 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
180 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
182 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
183 EDMA_ERR_IRQ_MASK_OFS = 0xc,
184 EDMA_ERR_D_PAR = (1 << 0),
185 EDMA_ERR_PRD_PAR = (1 << 1),
186 EDMA_ERR_DEV = (1 << 2),
187 EDMA_ERR_DEV_DCON = (1 << 3),
188 EDMA_ERR_DEV_CON = (1 << 4),
189 EDMA_ERR_SERR = (1 << 5),
190 EDMA_ERR_SELF_DIS = (1 << 7),
191 EDMA_ERR_BIST_ASYNC = (1 << 8),
192 EDMA_ERR_CRBQ_PAR = (1 << 9),
193 EDMA_ERR_CRPB_PAR = (1 << 10),
194 EDMA_ERR_INTRL_PAR = (1 << 11),
195 EDMA_ERR_IORDY = (1 << 12),
196 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
197 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
198 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
199 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
200 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
201 EDMA_ERR_TRANS_PROTO = (1 << 31),
202 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
203 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
204 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
205 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
206 EDMA_ERR_LNK_DATA_RX |
207 EDMA_ERR_LNK_DATA_TX |
208 EDMA_ERR_TRANS_PROTO),
210 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
211 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
213 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
214 EDMA_REQ_Q_PTR_SHIFT = 5,
216 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
217 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
218 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
219 EDMA_RSP_Q_PTR_SHIFT = 3,
226 EDMA_IORDY_TMOUT = 0x34,
229 /* Host private flags (hp_flags) */
230 MV_HP_FLAG_MSI = (1 << 0),
231 MV_HP_ERRATA_50XXB0 = (1 << 1),
232 MV_HP_ERRATA_50XXB2 = (1 << 2),
233 MV_HP_ERRATA_60X1B2 = (1 << 3),
234 MV_HP_ERRATA_60X1C0 = (1 << 4),
235 MV_HP_ERRATA_XX42A0 = (1 << 5),
236 MV_HP_50XX = (1 << 6),
237 MV_HP_GEN_IIE = (1 << 7),
239 /* Port private flags (pp_flags) */
240 MV_PP_FLAG_EDMA_EN = (1 << 0),
241 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
244 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
245 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
246 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
247 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
248 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251 /* Our DMA boundary is determined by an ePRD being unable to handle
252 * anything larger than 64KB
254 MV_DMA_BOUNDARY = 0xffffU,
256 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
258 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
271 /* Command ReQuest Block: 32B */
287 /* Command ResPonse Block: 8B */
294 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
302 struct mv_port_priv {
303 struct mv_crqb *crqb;
305 struct mv_crpb *crpb;
307 struct mv_sg *sg_tbl;
308 dma_addr_t sg_tbl_dma;
312 struct mv_port_signal {
319 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
321 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
322 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
324 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
326 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
327 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330 struct mv_host_priv {
332 struct mv_port_signal signal[8];
333 const struct mv_hw_ops *ops;
336 static void mv_irq_clear(struct ata_port *ap);
337 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
338 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
339 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341 static void mv_phy_reset(struct ata_port *ap);
342 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
343 static int mv_port_start(struct ata_port *ap);
344 static void mv_port_stop(struct ata_port *ap);
345 static void mv_qc_prep(struct ata_queued_cmd *qc);
346 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
347 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
348 static irqreturn_t mv_interrupt(int irq, void *dev_instance);
349 static void mv_eng_timeout(struct ata_port *ap);
350 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
352 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
354 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
355 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
357 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
359 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
360 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
362 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
364 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
365 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
367 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
369 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
370 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
371 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
372 unsigned int port_no);
373 static void mv_stop_and_reset(struct ata_port *ap);
375 static struct scsi_host_template mv_sht = {
376 .module = THIS_MODULE,
378 .ioctl = ata_scsi_ioctl,
379 .queuecommand = ata_scsi_queuecmd,
380 .can_queue = MV_USE_Q_DEPTH,
381 .this_id = ATA_SHT_THIS_ID,
382 .sg_tablesize = MV_MAX_SG_CT / 2,
383 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
384 .emulated = ATA_SHT_EMULATED,
385 .use_clustering = ATA_SHT_USE_CLUSTERING,
386 .proc_name = DRV_NAME,
387 .dma_boundary = MV_DMA_BOUNDARY,
388 .slave_configure = ata_scsi_slave_config,
389 .slave_destroy = ata_scsi_slave_destroy,
390 .bios_param = ata_std_bios_param,
393 static const struct ata_port_operations mv5_ops = {
394 .port_disable = ata_port_disable,
396 .tf_load = ata_tf_load,
397 .tf_read = ata_tf_read,
398 .check_status = ata_check_status,
399 .exec_command = ata_exec_command,
400 .dev_select = ata_std_dev_select,
402 .phy_reset = mv_phy_reset,
404 .qc_prep = mv_qc_prep,
405 .qc_issue = mv_qc_issue,
406 .data_xfer = ata_data_xfer,
408 .eng_timeout = mv_eng_timeout,
410 .irq_handler = mv_interrupt,
411 .irq_clear = mv_irq_clear,
412 .irq_on = ata_irq_on,
413 .irq_ack = ata_irq_ack,
415 .scr_read = mv5_scr_read,
416 .scr_write = mv5_scr_write,
418 .port_start = mv_port_start,
419 .port_stop = mv_port_stop,
422 static const struct ata_port_operations mv6_ops = {
423 .port_disable = ata_port_disable,
425 .tf_load = ata_tf_load,
426 .tf_read = ata_tf_read,
427 .check_status = ata_check_status,
428 .exec_command = ata_exec_command,
429 .dev_select = ata_std_dev_select,
431 .phy_reset = mv_phy_reset,
433 .qc_prep = mv_qc_prep,
434 .qc_issue = mv_qc_issue,
435 .data_xfer = ata_data_xfer,
437 .eng_timeout = mv_eng_timeout,
439 .irq_handler = mv_interrupt,
440 .irq_clear = mv_irq_clear,
441 .irq_on = ata_irq_on,
442 .irq_ack = ata_irq_ack,
444 .scr_read = mv_scr_read,
445 .scr_write = mv_scr_write,
447 .port_start = mv_port_start,
448 .port_stop = mv_port_stop,
451 static const struct ata_port_operations mv_iie_ops = {
452 .port_disable = ata_port_disable,
454 .tf_load = ata_tf_load,
455 .tf_read = ata_tf_read,
456 .check_status = ata_check_status,
457 .exec_command = ata_exec_command,
458 .dev_select = ata_std_dev_select,
460 .phy_reset = mv_phy_reset,
462 .qc_prep = mv_qc_prep_iie,
463 .qc_issue = mv_qc_issue,
464 .data_xfer = ata_data_xfer,
466 .eng_timeout = mv_eng_timeout,
468 .irq_handler = mv_interrupt,
469 .irq_clear = mv_irq_clear,
470 .irq_on = ata_irq_on,
471 .irq_ack = ata_irq_ack,
473 .scr_read = mv_scr_read,
474 .scr_write = mv_scr_write,
476 .port_start = mv_port_start,
477 .port_stop = mv_port_stop,
480 static const struct ata_port_info mv_port_info[] = {
483 .flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops,
490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops,
497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops,
504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops,
511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */
515 .port_ops = &mv6_ops,
519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops,
526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
527 .pio_mask = 0x1f, /* pio0-4 */
528 .udma_mask = 0x7f, /* udma0-6 */
529 .port_ops = &mv_iie_ops,
533 static const struct pci_device_id mv_pci_tbl[] = {
534 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
535 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
536 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
537 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
539 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
540 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
541 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
542 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
543 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
545 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
547 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
549 { } /* terminate list */
552 static struct pci_driver mv_pci_driver = {
554 .id_table = mv_pci_tbl,
555 .probe = mv_init_one,
556 .remove = ata_pci_remove_one,
559 static const struct mv_hw_ops mv5xxx_ops = {
560 .phy_errata = mv5_phy_errata,
561 .enable_leds = mv5_enable_leds,
562 .read_preamp = mv5_read_preamp,
563 .reset_hc = mv5_reset_hc,
564 .reset_flash = mv5_reset_flash,
565 .reset_bus = mv5_reset_bus,
568 static const struct mv_hw_ops mv6xxx_ops = {
569 .phy_errata = mv6_phy_errata,
570 .enable_leds = mv6_enable_leds,
571 .read_preamp = mv6_read_preamp,
572 .reset_hc = mv6_reset_hc,
573 .reset_flash = mv6_reset_flash,
574 .reset_bus = mv_reset_pci_bus,
580 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
587 static inline void writelfl(unsigned long data, void __iomem *addr)
590 (void) readl(addr); /* flush to avoid PCI posted write */
593 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
595 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
598 static inline unsigned int mv_hc_from_port(unsigned int port)
600 return port >> MV_PORT_HC_SHIFT;
603 static inline unsigned int mv_hardport_from_port(unsigned int port)
605 return port & MV_PORT_MASK;
608 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
611 return mv_hc_base(base, mv_hc_from_port(port));
614 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
616 return mv_hc_base_from_port(base, port) +
617 MV_SATAHC_ARBTR_REG_SZ +
618 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
621 static inline void __iomem *mv_ap_base(struct ata_port *ap)
623 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
626 static inline int mv_get_hc_count(unsigned long port_flags)
628 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
631 static void mv_irq_clear(struct ata_port *ap)
636 * mv_start_dma - Enable eDMA engine
637 * @base: port base address
638 * @pp: port private data
640 * Verify the local cache of the eDMA state is accurate with a
644 * Inherited from caller.
646 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
648 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
649 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
650 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
652 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
656 * mv_stop_dma - Disable eDMA engine
657 * @ap: ATA channel to manipulate
659 * Verify the local cache of the eDMA state is accurate with a
663 * Inherited from caller.
665 static void mv_stop_dma(struct ata_port *ap)
667 void __iomem *port_mmio = mv_ap_base(ap);
668 struct mv_port_priv *pp = ap->private_data;
672 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
673 /* Disable EDMA if active. The disable bit auto clears.
675 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
676 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
678 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
681 /* now properly wait for the eDMA to stop */
682 for (i = 1000; i > 0; i--) {
683 reg = readl(port_mmio + EDMA_CMD_OFS);
684 if (!(EDMA_EN & reg)) {
691 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
692 /* FIXME: Consider doing a reset here to recover */
697 static void mv_dump_mem(void __iomem *start, unsigned bytes)
700 for (b = 0; b < bytes; ) {
701 DPRINTK("%p: ", start + b);
702 for (w = 0; b < bytes && w < 4; w++) {
703 printk("%08x ",readl(start + b));
711 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
716 for (b = 0; b < bytes; ) {
717 DPRINTK("%02x: ", b);
718 for (w = 0; b < bytes && w < 4; w++) {
719 (void) pci_read_config_dword(pdev,b,&dw);
727 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
728 struct pci_dev *pdev)
731 void __iomem *hc_base = mv_hc_base(mmio_base,
732 port >> MV_PORT_HC_SHIFT);
733 void __iomem *port_base;
734 int start_port, num_ports, p, start_hc, num_hcs, hc;
737 start_hc = start_port = 0;
738 num_ports = 8; /* shld be benign for 4 port devs */
741 start_hc = port >> MV_PORT_HC_SHIFT;
743 num_ports = num_hcs = 1;
745 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
746 num_ports > 1 ? num_ports - 1 : start_port);
749 DPRINTK("PCI config space regs:\n");
750 mv_dump_pci_cfg(pdev, 0x68);
752 DPRINTK("PCI regs:\n");
753 mv_dump_mem(mmio_base+0xc00, 0x3c);
754 mv_dump_mem(mmio_base+0xd00, 0x34);
755 mv_dump_mem(mmio_base+0xf00, 0x4);
756 mv_dump_mem(mmio_base+0x1d00, 0x6c);
757 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
758 hc_base = mv_hc_base(mmio_base, hc);
759 DPRINTK("HC regs (HC %i):\n", hc);
760 mv_dump_mem(hc_base, 0x1c);
762 for (p = start_port; p < start_port + num_ports; p++) {
763 port_base = mv_port_base(mmio_base, p);
764 DPRINTK("EDMA regs (port %i):\n",p);
765 mv_dump_mem(port_base, 0x54);
766 DPRINTK("SATA regs (port %i):\n",p);
767 mv_dump_mem(port_base+0x300, 0x60);
772 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
780 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
783 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
792 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
794 unsigned int ofs = mv_scr_offset(sc_reg_in);
796 if (0xffffffffU != ofs) {
797 return readl(mv_ap_base(ap) + ofs);
803 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
805 unsigned int ofs = mv_scr_offset(sc_reg_in);
807 if (0xffffffffU != ofs) {
808 writelfl(val, mv_ap_base(ap) + ofs);
812 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
814 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
816 /* set up non-NCQ EDMA configuration */
817 cfg &= ~(1 << 9); /* disable equeue */
819 if (IS_GEN_I(hpriv)) {
820 cfg &= ~0x1f; /* clear queue depth */
821 cfg |= (1 << 8); /* enab config burst size mask */
824 else if (IS_GEN_II(hpriv)) {
825 cfg &= ~0x1f; /* clear queue depth */
826 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
827 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
830 else if (IS_GEN_IIE(hpriv)) {
831 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
832 cfg |= (1 << 22); /* enab 4-entry host queue cache */
833 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
834 cfg |= (1 << 18); /* enab early completion */
835 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
836 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
837 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
840 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
844 * mv_port_start - Port specific init/start routine.
845 * @ap: ATA channel to manipulate
847 * Allocate and point to DMA memory, init port private memory,
851 * Inherited from caller.
853 static int mv_port_start(struct ata_port *ap)
855 struct device *dev = ap->host->dev;
856 struct mv_host_priv *hpriv = ap->host->private_data;
857 struct mv_port_priv *pp;
858 void __iomem *port_mmio = mv_ap_base(ap);
863 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
867 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
871 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
873 rc = ata_pad_alloc(ap, dev);
877 /* First item in chunk of DMA memory:
878 * 32-slot command request table (CRQB), 32 bytes each in size
881 pp->crqb_dma = mem_dma;
883 mem_dma += MV_CRQB_Q_SZ;
886 * 32-slot command response table (CRPB), 8 bytes each in size
889 pp->crpb_dma = mem_dma;
891 mem_dma += MV_CRPB_Q_SZ;
894 * Table of scatter-gather descriptors (ePRD), 16 bytes each
897 pp->sg_tbl_dma = mem_dma;
899 mv_edma_cfg(hpriv, port_mmio);
901 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
902 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
903 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
905 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
906 writelfl(pp->crqb_dma & 0xffffffff,
907 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
909 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
911 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
913 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
914 writelfl(pp->crpb_dma & 0xffffffff,
915 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
917 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
919 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
920 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
922 /* Don't turn on EDMA here...do it before DMA commands only. Else
923 * we'll be unable to send non-data, PIO, etc due to restricted access
926 ap->private_data = pp;
931 * mv_port_stop - Port specific cleanup/stop routine.
932 * @ap: ATA channel to manipulate
934 * Stop DMA, cleanup port memory.
937 * This routine uses the host lock to protect the DMA stop.
939 static void mv_port_stop(struct ata_port *ap)
943 spin_lock_irqsave(&ap->host->lock, flags);
945 spin_unlock_irqrestore(&ap->host->lock, flags);
949 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
950 * @qc: queued command whose SG list to source from
952 * Populate the SG list and mark the last entry.
955 * Inherited from caller.
957 static void mv_fill_sg(struct ata_queued_cmd *qc)
959 struct mv_port_priv *pp = qc->ap->private_data;
961 struct scatterlist *sg;
963 ata_for_each_sg(sg, qc) {
965 u32 sg_len, len, offset;
967 addr = sg_dma_address(sg);
968 sg_len = sg_dma_len(sg);
971 offset = addr & MV_DMA_BOUNDARY;
973 if ((offset + sg_len) > 0x10000)
974 len = 0x10000 - offset;
976 pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
977 pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
978 pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
983 if (!sg_len && ata_sg_is_last(sg, qc))
984 pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
991 static inline unsigned mv_inc_q_index(unsigned index)
993 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
996 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
998 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
999 (last ? CRQB_CMD_LAST : 0);
1000 *cmdw = cpu_to_le16(tmp);
1004 * mv_qc_prep - Host specific command preparation.
1005 * @qc: queued command to prepare
1007 * This routine simply redirects to the general purpose routine
1008 * if command is not DMA. Else, it handles prep of the CRQB
1009 * (command request block), does some sanity checking, and calls
1010 * the SG load routine.
1013 * Inherited from caller.
1015 static void mv_qc_prep(struct ata_queued_cmd *qc)
1017 struct ata_port *ap = qc->ap;
1018 struct mv_port_priv *pp = ap->private_data;
1020 struct ata_taskfile *tf;
1024 if (ATA_PROT_DMA != qc->tf.protocol)
1027 /* Fill in command request block
1029 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1030 flags |= CRQB_FLAG_READ;
1031 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1032 flags |= qc->tag << CRQB_TAG_SHIFT;
1034 /* get current queue index from hardware */
1035 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1036 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1038 pp->crqb[in_index].sg_addr =
1039 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1040 pp->crqb[in_index].sg_addr_hi =
1041 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1042 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1044 cw = &pp->crqb[in_index].ata_cmd[0];
1047 /* Sadly, the CRQB cannot accomodate all registers--there are
1048 * only 11 bytes...so we must pick and choose required
1049 * registers based on the command. So, we drop feature and
1050 * hob_feature for [RW] DMA commands, but they are needed for
1051 * NCQ. NCQ will drop hob_nsect.
1053 switch (tf->command) {
1055 case ATA_CMD_READ_EXT:
1057 case ATA_CMD_WRITE_EXT:
1058 case ATA_CMD_WRITE_FUA_EXT:
1059 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1061 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1062 case ATA_CMD_FPDMA_READ:
1063 case ATA_CMD_FPDMA_WRITE:
1064 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1065 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1067 #endif /* FIXME: remove this line when NCQ added */
1069 /* The only other commands EDMA supports in non-queued and
1070 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1071 * of which are defined/used by Linux. If we get here, this
1072 * driver needs work.
1074 * FIXME: modify libata to give qc_prep a return value and
1075 * return error here.
1077 BUG_ON(tf->command);
1080 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1081 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1082 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1083 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1084 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1085 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1086 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1087 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1088 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1090 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1096 * mv_qc_prep_iie - Host specific command preparation.
1097 * @qc: queued command to prepare
1099 * This routine simply redirects to the general purpose routine
1100 * if command is not DMA. Else, it handles prep of the CRQB
1101 * (command request block), does some sanity checking, and calls
1102 * the SG load routine.
1105 * Inherited from caller.
1107 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1109 struct ata_port *ap = qc->ap;
1110 struct mv_port_priv *pp = ap->private_data;
1111 struct mv_crqb_iie *crqb;
1112 struct ata_taskfile *tf;
1116 if (ATA_PROT_DMA != qc->tf.protocol)
1119 /* Fill in Gen IIE command request block
1121 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1122 flags |= CRQB_FLAG_READ;
1124 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1125 flags |= qc->tag << CRQB_TAG_SHIFT;
1127 /* get current queue index from hardware */
1128 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1129 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1131 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1132 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1133 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1134 crqb->flags = cpu_to_le32(flags);
1137 crqb->ata_cmd[0] = cpu_to_le32(
1138 (tf->command << 16) |
1141 crqb->ata_cmd[1] = cpu_to_le32(
1147 crqb->ata_cmd[2] = cpu_to_le32(
1148 (tf->hob_lbal << 0) |
1149 (tf->hob_lbam << 8) |
1150 (tf->hob_lbah << 16) |
1151 (tf->hob_feature << 24)
1153 crqb->ata_cmd[3] = cpu_to_le32(
1155 (tf->hob_nsect << 8)
1158 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1164 * mv_qc_issue - Initiate a command to the host
1165 * @qc: queued command to start
1167 * This routine simply redirects to the general purpose routine
1168 * if command is not DMA. Else, it sanity checks our local
1169 * caches of the request producer/consumer indices then enables
1170 * DMA and bumps the request producer index.
1173 * Inherited from caller.
1175 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1177 void __iomem *port_mmio = mv_ap_base(qc->ap);
1178 struct mv_port_priv *pp = qc->ap->private_data;
1182 if (ATA_PROT_DMA != qc->tf.protocol) {
1183 /* We're about to send a non-EDMA capable command to the
1184 * port. Turn off EDMA so there won't be problems accessing
1185 * shadow block, etc registers.
1187 mv_stop_dma(qc->ap);
1188 return ata_qc_issue_prot(qc);
1191 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1192 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1194 /* until we do queuing, the queue should be empty at this point */
1195 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1196 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1198 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1200 mv_start_dma(port_mmio, pp);
1202 /* and write the request in pointer to kick the EDMA to life */
1203 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1204 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1205 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1211 * mv_get_crpb_status - get status from most recently completed cmd
1212 * @ap: ATA channel to manipulate
1214 * This routine is for use when the port is in DMA mode, when it
1215 * will be using the CRPB (command response block) method of
1216 * returning command completion information. We check indices
1217 * are good, grab status, and bump the response consumer index to
1218 * prove that we're up to date.
1221 * Inherited from caller.
1223 static u8 mv_get_crpb_status(struct ata_port *ap)
1225 void __iomem *port_mmio = mv_ap_base(ap);
1226 struct mv_port_priv *pp = ap->private_data;
1231 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1232 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1234 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1235 >> CRPB_FLAG_STATUS_SHIFT;
1237 /* increment our consumer index... */
1238 out_index = mv_inc_q_index(out_index);
1240 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1241 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1242 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1244 /* write out our inc'd consumer index so EDMA knows we're caught up */
1245 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1246 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1247 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1249 /* Return ATA status register for completed CRPB */
1254 * mv_err_intr - Handle error interrupts on the port
1255 * @ap: ATA channel to manipulate
1256 * @reset_allowed: bool: 0 == don't trigger from reset here
1258 * In most cases, just clear the interrupt and move on. However,
1259 * some cases require an eDMA reset, which is done right before
1260 * the COMRESET in mv_phy_reset(). The SERR case requires a
1261 * clear of pending errors in the SATA SERROR register. Finally,
1262 * if the port disabled DMA, update our cached copy to match.
1265 * Inherited from caller.
1267 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1269 void __iomem *port_mmio = mv_ap_base(ap);
1270 u32 edma_err_cause, serr = 0;
1272 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1274 if (EDMA_ERR_SERR & edma_err_cause) {
1275 sata_scr_read(ap, SCR_ERROR, &serr);
1276 sata_scr_write_flush(ap, SCR_ERROR, serr);
1278 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1279 struct mv_port_priv *pp = ap->private_data;
1280 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1282 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1283 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1285 /* Clear EDMA now that SERR cleanup done */
1286 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1288 /* check for fatal here and recover if needed */
1289 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1290 mv_stop_and_reset(ap);
1294 * mv_host_intr - Handle all interrupts on the given host controller
1295 * @host: host specific structure
1296 * @relevant: port error bits relevant to this host controller
1297 * @hc: which host controller we're to look at
1299 * Read then write clear the HC interrupt status then walk each
1300 * port connected to the HC and see if it needs servicing. Port
1301 * success ints are reported in the HC interrupt status reg, the
1302 * port error ints are reported in the higher level main
1303 * interrupt status register and thus are passed in via the
1304 * 'relevant' argument.
1307 * Inherited from caller.
1309 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1311 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1312 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1313 struct ata_queued_cmd *qc;
1315 int shift, port, port0, hard_port, handled;
1316 unsigned int err_mask;
1321 port0 = MV_PORTS_PER_HC;
1324 /* we'll need the HC success int register in most cases */
1325 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1327 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1330 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1331 hc,relevant,hc_irq_cause);
1333 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1335 struct ata_port *ap = host->ports[port];
1336 struct mv_port_priv *pp = ap->private_data;
1338 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1339 handled = 0; /* ensure ata_status is set if handled++ */
1341 /* Note that DEV_IRQ might happen spuriously during EDMA,
1342 * and should be ignored in such cases.
1343 * The cause of this is still under investigation.
1345 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1346 /* EDMA: check for response queue interrupt */
1347 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1348 ata_status = mv_get_crpb_status(ap);
1352 /* PIO: check for device (drive) interrupt */
1353 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1354 ata_status = readb(ap->ioaddr.status_addr);
1356 /* ignore spurious intr if drive still BUSY */
1357 if (ata_status & ATA_BUSY) {
1364 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1367 err_mask = ac_err_mask(ata_status);
1369 shift = port << 1; /* (port * 2) */
1370 if (port >= MV_PORTS_PER_HC) {
1371 shift++; /* skip bit 8 in the HC Main IRQ reg */
1373 if ((PORT0_ERR << shift) & relevant) {
1375 err_mask |= AC_ERR_OTHER;
1380 qc = ata_qc_from_tag(ap, ap->active_tag);
1381 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1382 VPRINTK("port %u IRQ found for qc, "
1383 "ata_status 0x%x\n", port,ata_status);
1384 /* mark qc status appropriately */
1385 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1386 qc->err_mask |= err_mask;
1387 ata_qc_complete(qc);
1398 * @dev_instance: private data; in this case the host structure
1401 * Read the read only register to determine if any host
1402 * controllers have pending interrupts. If so, call lower level
1403 * routine to handle. Also check for PCI errors which are only
1407 * This routine holds the host lock while processing pending
1410 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1412 struct ata_host *host = dev_instance;
1413 unsigned int hc, handled = 0, n_hcs;
1414 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1415 struct mv_host_priv *hpriv;
1418 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1420 /* check the cases where we either have nothing pending or have read
1421 * a bogus register value which can indicate HW removal or PCI fault
1423 if (!irq_stat || (0xffffffffU == irq_stat)) {
1427 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1428 spin_lock(&host->lock);
1430 for (hc = 0; hc < n_hcs; hc++) {
1431 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1433 mv_host_intr(host, relevant, hc);
1438 hpriv = host->private_data;
1439 if (IS_60XX(hpriv)) {
1440 /* deal with the interrupt coalescing bits */
1441 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1442 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1443 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1444 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1448 if (PCI_ERR & irq_stat) {
1449 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1450 readl(mmio + PCI_IRQ_CAUSE_OFS));
1452 DPRINTK("All regs @ PCI error\n");
1453 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1455 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1458 spin_unlock(&host->lock);
1460 return IRQ_RETVAL(handled);
1463 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1465 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1466 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1468 return hc_mmio + ofs;
1471 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1475 switch (sc_reg_in) {
1479 ofs = sc_reg_in * sizeof(u32);
1488 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1490 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1491 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1492 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1494 if (ofs != 0xffffffffU)
1495 return readl(addr + ofs);
1500 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1502 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1503 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1504 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1506 if (ofs != 0xffffffffU)
1507 writelfl(val, addr + ofs);
1510 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1515 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1517 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1520 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1522 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1525 mv_reset_pci_bus(pdev, mmio);
1528 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1530 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1533 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1536 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1539 tmp = readl(phy_mmio + MV5_PHY_MODE);
1541 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1542 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1545 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1549 writel(0, mmio + MV_GPIO_PORT_CTL);
1551 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1553 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1555 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1558 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1561 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1562 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1564 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1567 tmp = readl(phy_mmio + MV5_LT_MODE);
1569 writel(tmp, phy_mmio + MV5_LT_MODE);
1571 tmp = readl(phy_mmio + MV5_PHY_CTL);
1574 writel(tmp, phy_mmio + MV5_PHY_CTL);
1577 tmp = readl(phy_mmio + MV5_PHY_MODE);
1579 tmp |= hpriv->signal[port].pre;
1580 tmp |= hpriv->signal[port].amps;
1581 writel(tmp, phy_mmio + MV5_PHY_MODE);
1586 #define ZERO(reg) writel(0, port_mmio + (reg))
1587 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1590 void __iomem *port_mmio = mv_port_base(mmio, port);
1592 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1594 mv_channel_reset(hpriv, mmio, port);
1596 ZERO(0x028); /* command */
1597 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1598 ZERO(0x004); /* timer */
1599 ZERO(0x008); /* irq err cause */
1600 ZERO(0x00c); /* irq err mask */
1601 ZERO(0x010); /* rq bah */
1602 ZERO(0x014); /* rq inp */
1603 ZERO(0x018); /* rq outp */
1604 ZERO(0x01c); /* respq bah */
1605 ZERO(0x024); /* respq outp */
1606 ZERO(0x020); /* respq inp */
1607 ZERO(0x02c); /* test control */
1608 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1612 #define ZERO(reg) writel(0, hc_mmio + (reg))
1613 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1616 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1624 tmp = readl(hc_mmio + 0x20);
1627 writel(tmp, hc_mmio + 0x20);
1631 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1634 unsigned int hc, port;
1636 for (hc = 0; hc < n_hc; hc++) {
1637 for (port = 0; port < MV_PORTS_PER_HC; port++)
1638 mv5_reset_hc_port(hpriv, mmio,
1639 (hc * MV_PORTS_PER_HC) + port);
1641 mv5_reset_one_hc(hpriv, mmio, hc);
1648 #define ZERO(reg) writel(0, mmio + (reg))
1649 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1653 tmp = readl(mmio + MV_PCI_MODE);
1655 writel(tmp, mmio + MV_PCI_MODE);
1657 ZERO(MV_PCI_DISC_TIMER);
1658 ZERO(MV_PCI_MSI_TRIGGER);
1659 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1660 ZERO(HC_MAIN_IRQ_MASK_OFS);
1661 ZERO(MV_PCI_SERR_MASK);
1662 ZERO(PCI_IRQ_CAUSE_OFS);
1663 ZERO(PCI_IRQ_MASK_OFS);
1664 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1665 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1666 ZERO(MV_PCI_ERR_ATTRIBUTE);
1667 ZERO(MV_PCI_ERR_COMMAND);
1671 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1675 mv5_reset_flash(hpriv, mmio);
1677 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1679 tmp |= (1 << 5) | (1 << 6);
1680 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1684 * mv6_reset_hc - Perform the 6xxx global soft reset
1685 * @mmio: base address of the HBA
1687 * This routine only applies to 6xxx parts.
1690 * Inherited from caller.
1692 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1695 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1699 /* Following procedure defined in PCI "main command and status
1703 writel(t | STOP_PCI_MASTER, reg);
1705 for (i = 0; i < 1000; i++) {
1708 if (PCI_MASTER_EMPTY & t) {
1712 if (!(PCI_MASTER_EMPTY & t)) {
1713 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1721 writel(t | GLOB_SFT_RST, reg);
1724 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1726 if (!(GLOB_SFT_RST & t)) {
1727 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1732 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1735 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1738 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1740 if (GLOB_SFT_RST & t) {
1741 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1748 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1751 void __iomem *port_mmio;
1754 tmp = readl(mmio + MV_RESET_CFG);
1755 if ((tmp & (1 << 0)) == 0) {
1756 hpriv->signal[idx].amps = 0x7 << 8;
1757 hpriv->signal[idx].pre = 0x1 << 5;
1761 port_mmio = mv_port_base(mmio, idx);
1762 tmp = readl(port_mmio + PHY_MODE2);
1764 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1765 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1768 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1770 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1773 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1776 void __iomem *port_mmio = mv_port_base(mmio, port);
1778 u32 hp_flags = hpriv->hp_flags;
1780 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1782 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1785 if (fix_phy_mode2) {
1786 m2 = readl(port_mmio + PHY_MODE2);
1789 writel(m2, port_mmio + PHY_MODE2);
1793 m2 = readl(port_mmio + PHY_MODE2);
1794 m2 &= ~((1 << 16) | (1 << 31));
1795 writel(m2, port_mmio + PHY_MODE2);
1800 /* who knows what this magic does */
1801 tmp = readl(port_mmio + PHY_MODE3);
1804 writel(tmp, port_mmio + PHY_MODE3);
1806 if (fix_phy_mode4) {
1809 m4 = readl(port_mmio + PHY_MODE4);
1811 if (hp_flags & MV_HP_ERRATA_60X1B2)
1812 tmp = readl(port_mmio + 0x310);
1814 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1816 writel(m4, port_mmio + PHY_MODE4);
1818 if (hp_flags & MV_HP_ERRATA_60X1B2)
1819 writel(tmp, port_mmio + 0x310);
1822 /* Revert values of pre-emphasis and signal amps to the saved ones */
1823 m2 = readl(port_mmio + PHY_MODE2);
1825 m2 &= ~MV_M2_PREAMP_MASK;
1826 m2 |= hpriv->signal[port].amps;
1827 m2 |= hpriv->signal[port].pre;
1830 /* according to mvSata 3.6.1, some IIE values are fixed */
1831 if (IS_GEN_IIE(hpriv)) {
1836 writel(m2, port_mmio + PHY_MODE2);
1839 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1840 unsigned int port_no)
1842 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1844 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1846 if (IS_60XX(hpriv)) {
1847 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1848 ifctl |= (1 << 7); /* enable gen2i speed */
1849 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1850 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1853 udelay(25); /* allow reset propagation */
1855 /* Spec never mentions clearing the bit. Marvell's driver does
1856 * clear the bit, however.
1858 writelfl(0, port_mmio + EDMA_CMD_OFS);
1860 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1866 static void mv_stop_and_reset(struct ata_port *ap)
1868 struct mv_host_priv *hpriv = ap->host->private_data;
1869 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1873 mv_channel_reset(hpriv, mmio, ap->port_no);
1875 __mv_phy_reset(ap, 0);
1878 static inline void __msleep(unsigned int msec, int can_sleep)
1887 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1888 * @ap: ATA channel to manipulate
1890 * Part of this is taken from __sata_phy_reset and modified to
1891 * not sleep since this routine gets called from interrupt level.
1894 * Inherited from caller. This is coded to safe to call at
1895 * interrupt level, i.e. it does not sleep.
1897 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1899 struct mv_port_priv *pp = ap->private_data;
1900 struct mv_host_priv *hpriv = ap->host->private_data;
1901 void __iomem *port_mmio = mv_ap_base(ap);
1902 struct ata_taskfile tf;
1903 struct ata_device *dev = &ap->device[0];
1904 unsigned long timeout;
1908 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1910 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1911 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1912 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1914 /* Issue COMRESET via SControl */
1916 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1917 __msleep(1, can_sleep);
1919 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1920 __msleep(20, can_sleep);
1922 timeout = jiffies + msecs_to_jiffies(200);
1924 sata_scr_read(ap, SCR_STATUS, &sstatus);
1925 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1928 __msleep(1, can_sleep);
1929 } while (time_before(jiffies, timeout));
1931 /* work around errata */
1932 if (IS_60XX(hpriv) &&
1933 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1935 goto comreset_retry;
1937 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1938 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1939 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1941 if (ata_port_online(ap)) {
1944 sata_scr_read(ap, SCR_STATUS, &sstatus);
1945 ata_port_printk(ap, KERN_INFO,
1946 "no device found (phy stat %08x)\n", sstatus);
1947 ata_port_disable(ap);
1950 ap->cbl = ATA_CBL_SATA;
1952 /* even after SStatus reflects that device is ready,
1953 * it seems to take a while for link to be fully
1954 * established (and thus Status no longer 0x80/0x7F),
1955 * so we poll a bit for that, here.
1959 u8 drv_stat = ata_check_status(ap);
1960 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
1962 __msleep(500, can_sleep);
1967 tf.lbah = readb(ap->ioaddr.lbah_addr);
1968 tf.lbam = readb(ap->ioaddr.lbam_addr);
1969 tf.lbal = readb(ap->ioaddr.lbal_addr);
1970 tf.nsect = readb(ap->ioaddr.nsect_addr);
1972 dev->class = ata_dev_classify(&tf);
1973 if (!ata_dev_enabled(dev)) {
1974 VPRINTK("Port disabled post-sig: No device present.\n");
1975 ata_port_disable(ap);
1978 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1980 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1985 static void mv_phy_reset(struct ata_port *ap)
1987 __mv_phy_reset(ap, 1);
1991 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
1992 * @ap: ATA channel to manipulate
1994 * Intent is to clear all pending error conditions, reset the
1995 * chip/bus, fail the command, and move on.
1998 * This routine holds the host lock while failing the command.
2000 static void mv_eng_timeout(struct ata_port *ap)
2002 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2003 struct ata_queued_cmd *qc;
2004 unsigned long flags;
2006 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2007 DPRINTK("All regs @ start of eng_timeout\n");
2008 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2010 qc = ata_qc_from_tag(ap, ap->active_tag);
2011 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2012 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2014 spin_lock_irqsave(&ap->host->lock, flags);
2016 mv_stop_and_reset(ap);
2017 spin_unlock_irqrestore(&ap->host->lock, flags);
2019 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2020 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2021 qc->err_mask |= AC_ERR_TIMEOUT;
2022 ata_eh_qc_complete(qc);
2027 * mv_port_init - Perform some early initialization on a single port.
2028 * @port: libata data structure storing shadow register addresses
2029 * @port_mmio: base address of the port
2031 * Initialize shadow register mmio addresses, clear outstanding
2032 * interrupts on the port, and unmask interrupts for the future
2033 * start of the port.
2036 * Inherited from caller.
2038 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2040 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2043 /* PIO related setup
2045 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2047 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2048 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2049 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2050 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2051 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2052 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2054 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2055 /* special case: control/altstatus doesn't have ATA_REG_ address */
2056 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2059 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2061 /* Clear any currently outstanding port interrupt conditions */
2062 serr_ofs = mv_scr_offset(SCR_ERROR);
2063 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2064 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2066 /* unmask all EDMA error interrupts */
2067 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2069 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2070 readl(port_mmio + EDMA_CFG_OFS),
2071 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2072 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2075 static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2076 unsigned int board_idx)
2079 u32 hp_flags = hpriv->hp_flags;
2081 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2085 hpriv->ops = &mv5xxx_ops;
2086 hp_flags |= MV_HP_50XX;
2090 hp_flags |= MV_HP_ERRATA_50XXB0;
2093 hp_flags |= MV_HP_ERRATA_50XXB2;
2096 dev_printk(KERN_WARNING, &pdev->dev,
2097 "Applying 50XXB2 workarounds to unknown rev\n");
2098 hp_flags |= MV_HP_ERRATA_50XXB2;
2105 hpriv->ops = &mv5xxx_ops;
2106 hp_flags |= MV_HP_50XX;
2110 hp_flags |= MV_HP_ERRATA_50XXB0;
2113 hp_flags |= MV_HP_ERRATA_50XXB2;
2116 dev_printk(KERN_WARNING, &pdev->dev,
2117 "Applying B2 workarounds to unknown rev\n");
2118 hp_flags |= MV_HP_ERRATA_50XXB2;
2125 hpriv->ops = &mv6xxx_ops;
2129 hp_flags |= MV_HP_ERRATA_60X1B2;
2132 hp_flags |= MV_HP_ERRATA_60X1C0;
2135 dev_printk(KERN_WARNING, &pdev->dev,
2136 "Applying B2 workarounds to unknown rev\n");
2137 hp_flags |= MV_HP_ERRATA_60X1B2;
2144 hpriv->ops = &mv6xxx_ops;
2146 hp_flags |= MV_HP_GEN_IIE;
2150 hp_flags |= MV_HP_ERRATA_XX42A0;
2153 hp_flags |= MV_HP_ERRATA_60X1C0;
2156 dev_printk(KERN_WARNING, &pdev->dev,
2157 "Applying 60X1C0 workarounds to unknown rev\n");
2158 hp_flags |= MV_HP_ERRATA_60X1C0;
2164 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2168 hpriv->hp_flags = hp_flags;
2174 * mv_init_host - Perform some early initialization of the host.
2175 * @pdev: host PCI device
2176 * @probe_ent: early data struct representing the host
2178 * If possible, do an early global reset of the host. Then do
2179 * our port init and clear/unmask all/relevant host interrupts.
2182 * Inherited from caller.
2184 static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2185 unsigned int board_idx)
2187 int rc = 0, n_hc, port, hc;
2188 void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
2189 struct mv_host_priv *hpriv = probe_ent->private_data;
2191 /* global interrupt mask */
2192 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2194 rc = mv_chip_id(pdev, hpriv, board_idx);
2198 n_hc = mv_get_hc_count(probe_ent->port_flags);
2199 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2201 for (port = 0; port < probe_ent->n_ports; port++)
2202 hpriv->ops->read_preamp(hpriv, port, mmio);
2204 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2208 hpriv->ops->reset_flash(hpriv, mmio);
2209 hpriv->ops->reset_bus(pdev, mmio);
2210 hpriv->ops->enable_leds(hpriv, mmio);
2212 for (port = 0; port < probe_ent->n_ports; port++) {
2213 if (IS_60XX(hpriv)) {
2214 void __iomem *port_mmio = mv_port_base(mmio, port);
2216 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2217 ifctl |= (1 << 7); /* enable gen2i speed */
2218 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2219 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2222 hpriv->ops->phy_errata(hpriv, mmio, port);
2225 for (port = 0; port < probe_ent->n_ports; port++) {
2226 void __iomem *port_mmio = mv_port_base(mmio, port);
2227 mv_port_init(&probe_ent->port[port], port_mmio);
2230 for (hc = 0; hc < n_hc; hc++) {
2231 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2233 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2234 "(before clear)=0x%08x\n", hc,
2235 readl(hc_mmio + HC_CFG_OFS),
2236 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2238 /* Clear any currently outstanding hc interrupt conditions */
2239 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2242 /* Clear any currently outstanding host interrupt conditions */
2243 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2245 /* and unmask interrupt generation for host regs */
2246 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2247 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2249 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2250 "PCI int cause/mask=0x%08x/0x%08x\n",
2251 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2252 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2253 readl(mmio + PCI_IRQ_CAUSE_OFS),
2254 readl(mmio + PCI_IRQ_MASK_OFS));
2261 * mv_print_info - Dump key info to kernel log for perusal.
2262 * @probe_ent: early data struct representing the host
2264 * FIXME: complete this.
2267 * Inherited from caller.
2269 static void mv_print_info(struct ata_probe_ent *probe_ent)
2271 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2272 struct mv_host_priv *hpriv = probe_ent->private_data;
2276 /* Use this to determine the HW stepping of the chip so we know
2277 * what errata to workaround
2279 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2281 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2284 else if (scc == 0x01)
2289 dev_printk(KERN_INFO, &pdev->dev,
2290 "%u slots %u ports %s mode IRQ via %s\n",
2291 (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2292 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2296 * mv_init_one - handle a positive probe of a Marvell host
2297 * @pdev: PCI device found
2298 * @ent: PCI device ID entry for the matched host
2301 * Inherited from caller.
2303 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2305 static int printed_version = 0;
2306 struct device *dev = &pdev->dev;
2307 struct ata_probe_ent *probe_ent;
2308 struct mv_host_priv *hpriv;
2309 unsigned int board_idx = (unsigned int)ent->driver_data;
2312 if (!printed_version++)
2313 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2315 rc = pcim_enable_device(pdev);
2318 pci_set_master(pdev);
2320 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2322 pcim_pin_device(pdev);
2326 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
2327 if (probe_ent == NULL)
2330 probe_ent->dev = pci_dev_to_dev(pdev);
2331 INIT_LIST_HEAD(&probe_ent->node);
2333 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2337 probe_ent->sht = mv_port_info[board_idx].sht;
2338 probe_ent->port_flags = mv_port_info[board_idx].flags;
2339 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2340 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2341 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2343 probe_ent->irq = pdev->irq;
2344 probe_ent->irq_flags = IRQF_SHARED;
2345 probe_ent->iomap = pcim_iomap_table(pdev);
2346 probe_ent->private_data = hpriv;
2348 /* initialize adapter */
2349 rc = mv_init_host(pdev, probe_ent, board_idx);
2353 /* Enable interrupts */
2354 if (msi && pci_enable_msi(pdev))
2357 mv_dump_pci_cfg(pdev, 0x68);
2358 mv_print_info(probe_ent);
2360 if (ata_device_add(probe_ent) == 0)
2363 devm_kfree(dev, probe_ent);
2367 static int __init mv_init(void)
2369 return pci_register_driver(&mv_pci_driver);
2372 static void __exit mv_exit(void)
2374 pci_unregister_driver(&mv_pci_driver);
2377 MODULE_AUTHOR("Brett Russ");
2378 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2379 MODULE_LICENSE("GPL");
2380 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2381 MODULE_VERSION(DRV_VERSION);
2383 module_param(msi, int, 0444);
2384 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2386 module_init(mv_init);
2387 module_exit(mv_exit);