2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
220 SATA_INTERFACE_CTL = 0x050,
222 MV_M2_PREAMP_MASK = 0x7e0,
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
310 EDMA_RSP_Q_PTR_SHIFT = 3,
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
317 EDMA_IORDY_TMOUT = 0x34,
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
326 MV_HP_ERRATA_XX42A0 = (1 << 5),
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
338 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
340 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
346 MV_DMA_BOUNDARY = 0xffffU,
348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
353 /* ditto, for response queue */
354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
367 /* Command ReQuest Block: 32B */
383 /* Command ResPonse Block: 8B */
390 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
398 struct mv_port_priv {
399 struct mv_crqb *crqb;
401 struct mv_crpb *crpb;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
406 unsigned int req_idx;
407 unsigned int resp_idx;
412 struct mv_port_signal {
417 struct mv_host_priv {
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
438 static void mv_irq_clear(struct ata_port *ap);
439 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
443 static int mv_port_start(struct ata_port *ap);
444 static void mv_port_stop(struct ata_port *ap);
445 static void mv_qc_prep(struct ata_queued_cmd *qc);
446 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
447 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
448 static void mv_error_handler(struct ata_port *ap);
449 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450 static void mv_eh_freeze(struct ata_port *ap);
451 static void mv_eh_thaw(struct ata_port *ap);
452 static void mv6_dev_config(struct ata_device *dev);
453 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
455 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
457 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
458 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
460 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
462 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
463 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
465 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
467 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
468 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
470 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
472 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
473 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
474 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
475 unsigned int port_no);
476 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
477 void __iomem *port_mmio, int want_ncq);
478 static int __mv_stop_dma(struct ata_port *ap);
480 static struct scsi_host_template mv5_sht = {
481 .module = THIS_MODULE,
483 .ioctl = ata_scsi_ioctl,
484 .queuecommand = ata_scsi_queuecmd,
485 .can_queue = ATA_DEF_QUEUE,
486 .this_id = ATA_SHT_THIS_ID,
487 .sg_tablesize = MV_MAX_SG_CT / 2,
488 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
489 .emulated = ATA_SHT_EMULATED,
491 .proc_name = DRV_NAME,
492 .dma_boundary = MV_DMA_BOUNDARY,
493 .slave_configure = ata_scsi_slave_config,
494 .slave_destroy = ata_scsi_slave_destroy,
495 .bios_param = ata_std_bios_param,
498 static struct scsi_host_template mv6_sht = {
499 .module = THIS_MODULE,
501 .ioctl = ata_scsi_ioctl,
502 .queuecommand = ata_scsi_queuecmd,
503 .can_queue = ATA_DEF_QUEUE,
504 .this_id = ATA_SHT_THIS_ID,
505 .sg_tablesize = MV_MAX_SG_CT / 2,
506 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
507 .emulated = ATA_SHT_EMULATED,
509 .proc_name = DRV_NAME,
510 .dma_boundary = MV_DMA_BOUNDARY,
511 .slave_configure = ata_scsi_slave_config,
512 .slave_destroy = ata_scsi_slave_destroy,
513 .bios_param = ata_std_bios_param,
516 static const struct ata_port_operations mv5_ops = {
517 .tf_load = ata_tf_load,
518 .tf_read = ata_tf_read,
519 .check_status = ata_check_status,
520 .exec_command = ata_exec_command,
521 .dev_select = ata_std_dev_select,
523 .cable_detect = ata_cable_sata,
525 .qc_prep = mv_qc_prep,
526 .qc_issue = mv_qc_issue,
527 .data_xfer = ata_data_xfer,
529 .irq_clear = mv_irq_clear,
530 .irq_on = ata_irq_on,
532 .error_handler = mv_error_handler,
533 .post_internal_cmd = mv_post_int_cmd,
534 .freeze = mv_eh_freeze,
537 .scr_read = mv5_scr_read,
538 .scr_write = mv5_scr_write,
540 .port_start = mv_port_start,
541 .port_stop = mv_port_stop,
544 static const struct ata_port_operations mv6_ops = {
545 .dev_config = mv6_dev_config,
546 .tf_load = ata_tf_load,
547 .tf_read = ata_tf_read,
548 .check_status = ata_check_status,
549 .exec_command = ata_exec_command,
550 .dev_select = ata_std_dev_select,
552 .cable_detect = ata_cable_sata,
554 .qc_prep = mv_qc_prep,
555 .qc_issue = mv_qc_issue,
556 .data_xfer = ata_data_xfer,
558 .irq_clear = mv_irq_clear,
559 .irq_on = ata_irq_on,
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static const struct ata_port_operations mv_iie_ops = {
574 .tf_load = ata_tf_load,
575 .tf_read = ata_tf_read,
576 .check_status = ata_check_status,
577 .exec_command = ata_exec_command,
578 .dev_select = ata_std_dev_select,
580 .cable_detect = ata_cable_sata,
582 .qc_prep = mv_qc_prep_iie,
583 .qc_issue = mv_qc_issue,
584 .data_xfer = ata_data_xfer,
586 .irq_clear = mv_irq_clear,
587 .irq_on = ata_irq_on,
589 .error_handler = mv_error_handler,
590 .post_internal_cmd = mv_post_int_cmd,
591 .freeze = mv_eh_freeze,
594 .scr_read = mv_scr_read,
595 .scr_write = mv_scr_write,
597 .port_start = mv_port_start,
598 .port_stop = mv_port_stop,
601 static const struct ata_port_info mv_port_info[] = {
603 .flags = MV_COMMON_FLAGS,
604 .pio_mask = 0x1f, /* pio0-4 */
605 .udma_mask = ATA_UDMA6,
606 .port_ops = &mv5_ops,
609 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv5_ops,
615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv5_ops,
621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
622 .pio_mask = 0x1f, /* pio0-4 */
623 .udma_mask = ATA_UDMA6,
624 .port_ops = &mv6_ops,
627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 .pio_mask = 0x1f, /* pio0-4 */
630 .udma_mask = ATA_UDMA6,
631 .port_ops = &mv6_ops,
634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
635 .pio_mask = 0x1f, /* pio0-4 */
636 .udma_mask = ATA_UDMA6,
637 .port_ops = &mv_iie_ops,
640 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
641 .pio_mask = 0x1f, /* pio0-4 */
642 .udma_mask = ATA_UDMA6,
643 .port_ops = &mv_iie_ops,
647 static const struct pci_device_id mv_pci_tbl[] = {
648 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
649 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
650 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
651 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
652 /* RocketRAID 1740/174x have different identifiers */
653 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
654 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
656 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
657 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
658 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
659 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
660 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
662 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
665 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
667 /* Marvell 7042 support */
668 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
670 /* Highpoint RocketRAID PCIe series */
671 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
672 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
674 { } /* terminate list */
677 static struct pci_driver mv_pci_driver = {
679 .id_table = mv_pci_tbl,
680 .probe = mv_init_one,
681 .remove = ata_pci_remove_one,
684 static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
693 static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
705 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
708 /* move to PCI layer or libata core? */
709 static int pci_go_64(struct pci_dev *pdev)
713 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
716 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
718 dev_printk(KERN_ERR, &pdev->dev,
719 "64-bit DMA enable failed\n");
724 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
726 dev_printk(KERN_ERR, &pdev->dev,
727 "32-bit DMA enable failed\n");
730 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
732 dev_printk(KERN_ERR, &pdev->dev,
733 "32-bit consistent DMA enable failed\n");
745 static inline void writelfl(unsigned long data, void __iomem *addr)
748 (void) readl(addr); /* flush to avoid PCI posted write */
751 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
753 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
756 static inline unsigned int mv_hc_from_port(unsigned int port)
758 return port >> MV_PORT_HC_SHIFT;
761 static inline unsigned int mv_hardport_from_port(unsigned int port)
763 return port & MV_PORT_MASK;
766 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
769 return mv_hc_base(base, mv_hc_from_port(port));
772 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
774 return mv_hc_base_from_port(base, port) +
775 MV_SATAHC_ARBTR_REG_SZ +
776 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
779 static inline void __iomem *mv_ap_base(struct ata_port *ap)
781 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
784 static inline int mv_get_hc_count(unsigned long port_flags)
786 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
789 static void mv_irq_clear(struct ata_port *ap)
793 static void mv_set_edma_ptrs(void __iomem *port_mmio,
794 struct mv_host_priv *hpriv,
795 struct mv_port_priv *pp)
800 * initialize request queue
802 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
804 WARN_ON(pp->crqb_dma & 0x3ff);
805 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
806 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
807 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
810 writelfl((pp->crqb_dma & 0xffffffff) | index,
811 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
813 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
816 * initialize response queue
818 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
820 WARN_ON(pp->crpb_dma & 0xff);
821 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
823 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
824 writelfl((pp->crpb_dma & 0xffffffff) | index,
825 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
827 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
829 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
830 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
834 * mv_start_dma - Enable eDMA engine
835 * @base: port base address
836 * @pp: port private data
838 * Verify the local cache of the eDMA state is accurate with a
842 * Inherited from caller.
844 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
845 struct mv_port_priv *pp, u8 protocol)
847 int want_ncq = (protocol == ATA_PROT_NCQ);
849 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
850 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
851 if (want_ncq != using_ncq)
854 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
855 struct mv_host_priv *hpriv = ap->host->private_data;
856 int hard_port = mv_hardport_from_port(ap->port_no);
857 void __iomem *hc_mmio = mv_hc_base_from_port(
858 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
859 u32 hc_irq_cause, ipending;
861 /* clear EDMA event indicators, if any */
862 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
864 /* clear EDMA interrupt indicator, if any */
865 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
866 ipending = (DEV_IRQ << hard_port) |
867 (CRPB_DMA_DONE << hard_port);
868 if (hc_irq_cause & ipending) {
869 writelfl(hc_irq_cause & ~ipending,
870 hc_mmio + HC_IRQ_CAUSE_OFS);
873 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
875 /* clear FIS IRQ Cause */
876 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
878 mv_set_edma_ptrs(port_mmio, hpriv, pp);
880 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
881 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
883 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
887 * __mv_stop_dma - Disable eDMA engine
888 * @ap: ATA channel to manipulate
890 * Verify the local cache of the eDMA state is accurate with a
894 * Inherited from caller.
896 static int __mv_stop_dma(struct ata_port *ap)
898 void __iomem *port_mmio = mv_ap_base(ap);
899 struct mv_port_priv *pp = ap->private_data;
903 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
904 /* Disable EDMA if active. The disable bit auto clears.
906 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
907 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
909 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
912 /* now properly wait for the eDMA to stop */
913 for (i = 1000; i > 0; i--) {
914 reg = readl(port_mmio + EDMA_CMD_OFS);
915 if (!(reg & EDMA_EN))
922 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
929 static int mv_stop_dma(struct ata_port *ap)
934 spin_lock_irqsave(&ap->host->lock, flags);
935 rc = __mv_stop_dma(ap);
936 spin_unlock_irqrestore(&ap->host->lock, flags);
942 static void mv_dump_mem(void __iomem *start, unsigned bytes)
945 for (b = 0; b < bytes; ) {
946 DPRINTK("%p: ", start + b);
947 for (w = 0; b < bytes && w < 4; w++) {
948 printk("%08x ", readl(start + b));
956 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
961 for (b = 0; b < bytes; ) {
962 DPRINTK("%02x: ", b);
963 for (w = 0; b < bytes && w < 4; w++) {
964 (void) pci_read_config_dword(pdev, b, &dw);
972 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
973 struct pci_dev *pdev)
976 void __iomem *hc_base = mv_hc_base(mmio_base,
977 port >> MV_PORT_HC_SHIFT);
978 void __iomem *port_base;
979 int start_port, num_ports, p, start_hc, num_hcs, hc;
982 start_hc = start_port = 0;
983 num_ports = 8; /* shld be benign for 4 port devs */
986 start_hc = port >> MV_PORT_HC_SHIFT;
988 num_ports = num_hcs = 1;
990 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
991 num_ports > 1 ? num_ports - 1 : start_port);
994 DPRINTK("PCI config space regs:\n");
995 mv_dump_pci_cfg(pdev, 0x68);
997 DPRINTK("PCI regs:\n");
998 mv_dump_mem(mmio_base+0xc00, 0x3c);
999 mv_dump_mem(mmio_base+0xd00, 0x34);
1000 mv_dump_mem(mmio_base+0xf00, 0x4);
1001 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1002 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1003 hc_base = mv_hc_base(mmio_base, hc);
1004 DPRINTK("HC regs (HC %i):\n", hc);
1005 mv_dump_mem(hc_base, 0x1c);
1007 for (p = start_port; p < start_port + num_ports; p++) {
1008 port_base = mv_port_base(mmio_base, p);
1009 DPRINTK("EDMA regs (port %i):\n", p);
1010 mv_dump_mem(port_base, 0x54);
1011 DPRINTK("SATA regs (port %i):\n", p);
1012 mv_dump_mem(port_base+0x300, 0x60);
1017 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1021 switch (sc_reg_in) {
1025 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1028 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1037 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1041 if (ofs != 0xffffffffU) {
1042 *val = readl(mv_ap_base(ap) + ofs);
1048 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1050 unsigned int ofs = mv_scr_offset(sc_reg_in);
1052 if (ofs != 0xffffffffU) {
1053 writelfl(val, mv_ap_base(ap) + ofs);
1059 static void mv6_dev_config(struct ata_device *adev)
1062 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1063 * See mv_qc_prep() for more info.
1065 if (adev->flags & ATA_DFLAG_NCQ)
1066 if (adev->max_sectors > ATA_MAX_SECTORS)
1067 adev->max_sectors = ATA_MAX_SECTORS;
1070 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1071 void __iomem *port_mmio, int want_ncq)
1075 /* set up non-NCQ EDMA configuration */
1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1078 if (IS_GEN_I(hpriv))
1079 cfg |= (1 << 8); /* enab config burst size mask */
1081 else if (IS_GEN_II(hpriv))
1082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1084 else if (IS_GEN_IIE(hpriv)) {
1085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1087 cfg |= (1 << 18); /* enab early completion */
1088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1092 cfg |= EDMA_CFG_NCQ;
1093 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1095 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1097 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1101 * mv_port_start - Port specific init/start routine.
1102 * @ap: ATA channel to manipulate
1104 * Allocate and point to DMA memory, init port private memory,
1108 * Inherited from caller.
1110 static int mv_port_start(struct ata_port *ap)
1112 struct device *dev = ap->host->dev;
1113 struct mv_host_priv *hpriv = ap->host->private_data;
1114 struct mv_port_priv *pp;
1115 void __iomem *port_mmio = mv_ap_base(ap);
1118 unsigned long flags;
1121 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1125 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1129 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1131 rc = ata_pad_alloc(ap, dev);
1135 /* First item in chunk of DMA memory:
1136 * 32-slot command request table (CRQB), 32 bytes each in size
1139 pp->crqb_dma = mem_dma;
1140 mem += MV_CRQB_Q_SZ;
1141 mem_dma += MV_CRQB_Q_SZ;
1144 * 32-slot command response table (CRPB), 8 bytes each in size
1147 pp->crpb_dma = mem_dma;
1148 mem += MV_CRPB_Q_SZ;
1149 mem_dma += MV_CRPB_Q_SZ;
1152 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1155 pp->sg_tbl_dma = mem_dma;
1157 spin_lock_irqsave(&ap->host->lock, flags);
1159 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1161 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1163 spin_unlock_irqrestore(&ap->host->lock, flags);
1165 /* Don't turn on EDMA here...do it before DMA commands only. Else
1166 * we'll be unable to send non-data, PIO, etc due to restricted access
1169 ap->private_data = pp;
1174 * mv_port_stop - Port specific cleanup/stop routine.
1175 * @ap: ATA channel to manipulate
1177 * Stop DMA, cleanup port memory.
1180 * This routine uses the host lock to protect the DMA stop.
1182 static void mv_port_stop(struct ata_port *ap)
1188 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1189 * @qc: queued command whose SG list to source from
1191 * Populate the SG list and mark the last entry.
1194 * Inherited from caller.
1196 static void mv_fill_sg(struct ata_queued_cmd *qc)
1198 struct mv_port_priv *pp = qc->ap->private_data;
1199 struct scatterlist *sg;
1200 struct mv_sg *mv_sg, *last_sg = NULL;
1204 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1205 dma_addr_t addr = sg_dma_address(sg);
1206 u32 sg_len = sg_dma_len(sg);
1209 u32 offset = addr & 0xffff;
1212 if ((offset + sg_len > 0x10000))
1213 len = 0x10000 - offset;
1215 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1216 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1217 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1227 if (likely(last_sg))
1228 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1231 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1233 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1234 (last ? CRQB_CMD_LAST : 0);
1235 *cmdw = cpu_to_le16(tmp);
1239 * mv_qc_prep - Host specific command preparation.
1240 * @qc: queued command to prepare
1242 * This routine simply redirects to the general purpose routine
1243 * if command is not DMA. Else, it handles prep of the CRQB
1244 * (command request block), does some sanity checking, and calls
1245 * the SG load routine.
1248 * Inherited from caller.
1250 static void mv_qc_prep(struct ata_queued_cmd *qc)
1252 struct ata_port *ap = qc->ap;
1253 struct mv_port_priv *pp = ap->private_data;
1255 struct ata_taskfile *tf;
1259 if (qc->tf.protocol != ATA_PROT_DMA)
1262 /* Fill in command request block
1264 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1265 flags |= CRQB_FLAG_READ;
1266 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1267 flags |= qc->tag << CRQB_TAG_SHIFT;
1269 /* get current queue index from software */
1270 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1272 pp->crqb[in_index].sg_addr =
1273 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1274 pp->crqb[in_index].sg_addr_hi =
1275 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1276 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1278 cw = &pp->crqb[in_index].ata_cmd[0];
1281 /* Sadly, the CRQB cannot accomodate all registers--there are
1282 * only 11 bytes...so we must pick and choose required
1283 * registers based on the command. So, we drop feature and
1284 * hob_feature for [RW] DMA commands, but they are needed for
1285 * NCQ. NCQ will drop hob_nsect.
1287 switch (tf->command) {
1289 case ATA_CMD_READ_EXT:
1291 case ATA_CMD_WRITE_EXT:
1292 case ATA_CMD_WRITE_FUA_EXT:
1293 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1295 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1296 case ATA_CMD_FPDMA_READ:
1297 case ATA_CMD_FPDMA_WRITE:
1298 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1299 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1301 #endif /* FIXME: remove this line when NCQ added */
1303 /* The only other commands EDMA supports in non-queued and
1304 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1305 * of which are defined/used by Linux. If we get here, this
1306 * driver needs work.
1308 * FIXME: modify libata to give qc_prep a return value and
1309 * return error here.
1311 BUG_ON(tf->command);
1314 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1315 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1316 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1317 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1318 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1319 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1320 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1321 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1322 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1324 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1330 * mv_qc_prep_iie - Host specific command preparation.
1331 * @qc: queued command to prepare
1333 * This routine simply redirects to the general purpose routine
1334 * if command is not DMA. Else, it handles prep of the CRQB
1335 * (command request block), does some sanity checking, and calls
1336 * the SG load routine.
1339 * Inherited from caller.
1341 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1343 struct ata_port *ap = qc->ap;
1344 struct mv_port_priv *pp = ap->private_data;
1345 struct mv_crqb_iie *crqb;
1346 struct ata_taskfile *tf;
1350 if (qc->tf.protocol != ATA_PROT_DMA)
1353 /* Fill in Gen IIE command request block
1355 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1356 flags |= CRQB_FLAG_READ;
1358 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1359 flags |= qc->tag << CRQB_TAG_SHIFT;
1360 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1362 /* get current queue index from software */
1363 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1365 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1366 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1367 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1368 crqb->flags = cpu_to_le32(flags);
1371 crqb->ata_cmd[0] = cpu_to_le32(
1372 (tf->command << 16) |
1375 crqb->ata_cmd[1] = cpu_to_le32(
1381 crqb->ata_cmd[2] = cpu_to_le32(
1382 (tf->hob_lbal << 0) |
1383 (tf->hob_lbam << 8) |
1384 (tf->hob_lbah << 16) |
1385 (tf->hob_feature << 24)
1387 crqb->ata_cmd[3] = cpu_to_le32(
1389 (tf->hob_nsect << 8)
1392 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1398 * mv_qc_issue - Initiate a command to the host
1399 * @qc: queued command to start
1401 * This routine simply redirects to the general purpose routine
1402 * if command is not DMA. Else, it sanity checks our local
1403 * caches of the request producer/consumer indices then enables
1404 * DMA and bumps the request producer index.
1407 * Inherited from caller.
1409 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1411 struct ata_port *ap = qc->ap;
1412 void __iomem *port_mmio = mv_ap_base(ap);
1413 struct mv_port_priv *pp = ap->private_data;
1416 if (qc->tf.protocol != ATA_PROT_DMA) {
1417 /* We're about to send a non-EDMA capable command to the
1418 * port. Turn off EDMA so there won't be problems accessing
1419 * shadow block, etc registers.
1422 return ata_qc_issue_prot(qc);
1425 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1427 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1429 /* until we do queuing, the queue should be empty at this point */
1430 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1431 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1435 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1437 /* and write the request in pointer to kick the EDMA to life */
1438 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1439 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1445 * mv_err_intr - Handle error interrupts on the port
1446 * @ap: ATA channel to manipulate
1447 * @reset_allowed: bool: 0 == don't trigger from reset here
1449 * In most cases, just clear the interrupt and move on. However,
1450 * some cases require an eDMA reset, which is done right before
1451 * the COMRESET in mv_phy_reset(). The SERR case requires a
1452 * clear of pending errors in the SATA SERROR register. Finally,
1453 * if the port disabled DMA, update our cached copy to match.
1456 * Inherited from caller.
1458 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1460 void __iomem *port_mmio = mv_ap_base(ap);
1461 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1462 struct mv_port_priv *pp = ap->private_data;
1463 struct mv_host_priv *hpriv = ap->host->private_data;
1464 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1465 unsigned int action = 0, err_mask = 0;
1466 struct ata_eh_info *ehi = &ap->link.eh_info;
1468 ata_ehi_clear_desc(ehi);
1470 if (!edma_enabled) {
1471 /* just a guess: do we need to do this? should we
1472 * expand this, and do it in all cases?
1474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1478 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1480 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1483 * all generations share these EDMA error cause bits
1486 if (edma_err_cause & EDMA_ERR_DEV)
1487 err_mask |= AC_ERR_DEV;
1488 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1489 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1490 EDMA_ERR_INTRL_PAR)) {
1491 err_mask |= AC_ERR_ATA_BUS;
1492 action |= ATA_EH_HARDRESET;
1493 ata_ehi_push_desc(ehi, "parity error");
1495 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1496 ata_ehi_hotplugged(ehi);
1497 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1498 "dev disconnect" : "dev connect");
1499 action |= ATA_EH_HARDRESET;
1502 if (IS_GEN_I(hpriv)) {
1503 eh_freeze_mask = EDMA_EH_FREEZE_5;
1505 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1506 struct mv_port_priv *pp = ap->private_data;
1507 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1508 ata_ehi_push_desc(ehi, "EDMA self-disable");
1511 eh_freeze_mask = EDMA_EH_FREEZE;
1513 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1514 struct mv_port_priv *pp = ap->private_data;
1515 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1516 ata_ehi_push_desc(ehi, "EDMA self-disable");
1519 if (edma_err_cause & EDMA_ERR_SERR) {
1520 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1521 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1522 err_mask = AC_ERR_ATA_BUS;
1523 action |= ATA_EH_HARDRESET;
1527 /* Clear EDMA now that SERR cleanup done */
1528 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1531 err_mask = AC_ERR_OTHER;
1532 action |= ATA_EH_HARDRESET;
1535 ehi->serror |= serr;
1536 ehi->action |= action;
1539 qc->err_mask |= err_mask;
1541 ehi->err_mask |= err_mask;
1543 if (edma_err_cause & eh_freeze_mask)
1544 ata_port_freeze(ap);
1549 static void mv_intr_pio(struct ata_port *ap)
1551 struct ata_queued_cmd *qc;
1554 /* ignore spurious intr if drive still BUSY */
1555 ata_status = readb(ap->ioaddr.status_addr);
1556 if (unlikely(ata_status & ATA_BUSY))
1559 /* get active ATA command */
1560 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1561 if (unlikely(!qc)) /* no active tag */
1563 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1566 /* and finally, complete the ATA command */
1567 qc->err_mask |= ac_err_mask(ata_status);
1568 ata_qc_complete(qc);
1571 static void mv_intr_edma(struct ata_port *ap)
1573 void __iomem *port_mmio = mv_ap_base(ap);
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 struct mv_port_priv *pp = ap->private_data;
1576 struct ata_queued_cmd *qc;
1577 u32 out_index, in_index;
1578 bool work_done = false;
1580 /* get h/w response queue pointer */
1581 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1582 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1588 /* get s/w response queue last-read pointer, and compare */
1589 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1590 if (in_index == out_index)
1593 /* 50xx: get active ATA command */
1594 if (IS_GEN_I(hpriv))
1595 tag = ap->link.active_tag;
1597 /* Gen II/IIE: get active ATA command via tag, to enable
1598 * support for queueing. this works transparently for
1599 * queued and non-queued modes.
1602 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1604 qc = ata_qc_from_tag(ap, tag);
1606 /* For non-NCQ mode, the lower 8 bits of status
1607 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608 * which should be zero if all went well.
1610 status = le16_to_cpu(pp->crpb[out_index].flags);
1611 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1612 mv_err_intr(ap, qc);
1616 /* and finally, complete the ATA command */
1619 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1620 ata_qc_complete(qc);
1623 /* advance software response queue pointer, to
1624 * indicate (after the loop completes) to hardware
1625 * that we have consumed a response queue entry.
1632 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1633 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1634 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1638 * mv_host_intr - Handle all interrupts on the given host controller
1639 * @host: host specific structure
1640 * @relevant: port error bits relevant to this host controller
1641 * @hc: which host controller we're to look at
1643 * Read then write clear the HC interrupt status then walk each
1644 * port connected to the HC and see if it needs servicing. Port
1645 * success ints are reported in the HC interrupt status reg, the
1646 * port error ints are reported in the higher level main
1647 * interrupt status register and thus are passed in via the
1648 * 'relevant' argument.
1651 * Inherited from caller.
1653 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1655 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1656 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1663 port0 = MV_PORTS_PER_HC;
1665 /* we'll need the HC success int register in most cases */
1666 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1670 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1672 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1673 hc, relevant, hc_irq_cause);
1675 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1676 struct ata_port *ap = host->ports[port];
1677 struct mv_port_priv *pp = ap->private_data;
1678 int have_err_bits, hard_port, shift;
1680 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1683 shift = port << 1; /* (port * 2) */
1684 if (port >= MV_PORTS_PER_HC) {
1685 shift++; /* skip bit 8 in the HC Main IRQ reg */
1687 have_err_bits = ((PORT0_ERR << shift) & relevant);
1689 if (unlikely(have_err_bits)) {
1690 struct ata_queued_cmd *qc;
1692 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1693 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1696 mv_err_intr(ap, qc);
1700 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1702 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1703 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1706 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1713 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1715 struct mv_host_priv *hpriv = host->private_data;
1716 struct ata_port *ap;
1717 struct ata_queued_cmd *qc;
1718 struct ata_eh_info *ehi;
1719 unsigned int i, err_mask, printed = 0;
1722 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1724 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1727 DPRINTK("All regs @ PCI error\n");
1728 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1730 writelfl(0, mmio + hpriv->irq_cause_ofs);
1732 for (i = 0; i < host->n_ports; i++) {
1733 ap = host->ports[i];
1734 if (!ata_link_offline(&ap->link)) {
1735 ehi = &ap->link.eh_info;
1736 ata_ehi_clear_desc(ehi);
1738 ata_ehi_push_desc(ehi,
1739 "PCI err cause 0x%08x", err_cause);
1740 err_mask = AC_ERR_HOST_BUS;
1741 ehi->action = ATA_EH_HARDRESET;
1742 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1744 qc->err_mask |= err_mask;
1746 ehi->err_mask |= err_mask;
1748 ata_port_freeze(ap);
1754 * mv_interrupt - Main interrupt event handler
1756 * @dev_instance: private data; in this case the host structure
1758 * Read the read only register to determine if any host
1759 * controllers have pending interrupts. If so, call lower level
1760 * routine to handle. Also check for PCI errors which are only
1764 * This routine holds the host lock while processing pending
1767 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1769 struct ata_host *host = dev_instance;
1770 unsigned int hc, handled = 0, n_hcs;
1771 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1772 u32 irq_stat, irq_mask;
1774 spin_lock(&host->lock);
1775 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1776 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1778 /* check the cases where we either have nothing pending or have read
1779 * a bogus register value which can indicate HW removal or PCI fault
1781 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1784 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1786 if (unlikely(irq_stat & PCI_ERR)) {
1787 mv_pci_error(host, mmio);
1789 goto out_unlock; /* skip all other HC irq handling */
1792 for (hc = 0; hc < n_hcs; hc++) {
1793 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1795 mv_host_intr(host, relevant, hc);
1801 spin_unlock(&host->lock);
1803 return IRQ_RETVAL(handled);
1806 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1808 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1809 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1811 return hc_mmio + ofs;
1814 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1818 switch (sc_reg_in) {
1822 ofs = sc_reg_in * sizeof(u32);
1831 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1833 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1834 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1835 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1837 if (ofs != 0xffffffffU) {
1838 *val = readl(addr + ofs);
1844 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1846 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1847 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1848 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1850 if (ofs != 0xffffffffU) {
1851 writelfl(val, addr + ofs);
1857 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1861 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1864 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1866 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1869 mv_reset_pci_bus(pdev, mmio);
1872 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1874 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1877 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1880 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1883 tmp = readl(phy_mmio + MV5_PHY_MODE);
1885 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1886 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1889 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1893 writel(0, mmio + MV_GPIO_PORT_CTL);
1895 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1897 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1899 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1902 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1905 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1906 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1908 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1911 tmp = readl(phy_mmio + MV5_LT_MODE);
1913 writel(tmp, phy_mmio + MV5_LT_MODE);
1915 tmp = readl(phy_mmio + MV5_PHY_CTL);
1918 writel(tmp, phy_mmio + MV5_PHY_CTL);
1921 tmp = readl(phy_mmio + MV5_PHY_MODE);
1923 tmp |= hpriv->signal[port].pre;
1924 tmp |= hpriv->signal[port].amps;
1925 writel(tmp, phy_mmio + MV5_PHY_MODE);
1930 #define ZERO(reg) writel(0, port_mmio + (reg))
1931 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1934 void __iomem *port_mmio = mv_port_base(mmio, port);
1936 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1938 mv_channel_reset(hpriv, mmio, port);
1940 ZERO(0x028); /* command */
1941 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1942 ZERO(0x004); /* timer */
1943 ZERO(0x008); /* irq err cause */
1944 ZERO(0x00c); /* irq err mask */
1945 ZERO(0x010); /* rq bah */
1946 ZERO(0x014); /* rq inp */
1947 ZERO(0x018); /* rq outp */
1948 ZERO(0x01c); /* respq bah */
1949 ZERO(0x024); /* respq outp */
1950 ZERO(0x020); /* respq inp */
1951 ZERO(0x02c); /* test control */
1952 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1956 #define ZERO(reg) writel(0, hc_mmio + (reg))
1957 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1960 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1968 tmp = readl(hc_mmio + 0x20);
1971 writel(tmp, hc_mmio + 0x20);
1975 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1978 unsigned int hc, port;
1980 for (hc = 0; hc < n_hc; hc++) {
1981 for (port = 0; port < MV_PORTS_PER_HC; port++)
1982 mv5_reset_hc_port(hpriv, mmio,
1983 (hc * MV_PORTS_PER_HC) + port);
1985 mv5_reset_one_hc(hpriv, mmio, hc);
1992 #define ZERO(reg) writel(0, mmio + (reg))
1993 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1995 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1996 struct mv_host_priv *hpriv = host->private_data;
1999 tmp = readl(mmio + MV_PCI_MODE);
2001 writel(tmp, mmio + MV_PCI_MODE);
2003 ZERO(MV_PCI_DISC_TIMER);
2004 ZERO(MV_PCI_MSI_TRIGGER);
2005 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2006 ZERO(HC_MAIN_IRQ_MASK_OFS);
2007 ZERO(MV_PCI_SERR_MASK);
2008 ZERO(hpriv->irq_cause_ofs);
2009 ZERO(hpriv->irq_mask_ofs);
2010 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2011 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2012 ZERO(MV_PCI_ERR_ATTRIBUTE);
2013 ZERO(MV_PCI_ERR_COMMAND);
2017 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2021 mv5_reset_flash(hpriv, mmio);
2023 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2025 tmp |= (1 << 5) | (1 << 6);
2026 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2030 * mv6_reset_hc - Perform the 6xxx global soft reset
2031 * @mmio: base address of the HBA
2033 * This routine only applies to 6xxx parts.
2036 * Inherited from caller.
2038 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2041 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2045 /* Following procedure defined in PCI "main command and status
2049 writel(t | STOP_PCI_MASTER, reg);
2051 for (i = 0; i < 1000; i++) {
2054 if (PCI_MASTER_EMPTY & t)
2057 if (!(PCI_MASTER_EMPTY & t)) {
2058 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2066 writel(t | GLOB_SFT_RST, reg);
2069 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2071 if (!(GLOB_SFT_RST & t)) {
2072 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2077 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2080 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2083 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2085 if (GLOB_SFT_RST & t) {
2086 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2093 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2096 void __iomem *port_mmio;
2099 tmp = readl(mmio + MV_RESET_CFG);
2100 if ((tmp & (1 << 0)) == 0) {
2101 hpriv->signal[idx].amps = 0x7 << 8;
2102 hpriv->signal[idx].pre = 0x1 << 5;
2106 port_mmio = mv_port_base(mmio, idx);
2107 tmp = readl(port_mmio + PHY_MODE2);
2109 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2110 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2113 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2115 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2118 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2121 void __iomem *port_mmio = mv_port_base(mmio, port);
2123 u32 hp_flags = hpriv->hp_flags;
2125 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2127 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2130 if (fix_phy_mode2) {
2131 m2 = readl(port_mmio + PHY_MODE2);
2134 writel(m2, port_mmio + PHY_MODE2);
2138 m2 = readl(port_mmio + PHY_MODE2);
2139 m2 &= ~((1 << 16) | (1 << 31));
2140 writel(m2, port_mmio + PHY_MODE2);
2145 /* who knows what this magic does */
2146 tmp = readl(port_mmio + PHY_MODE3);
2149 writel(tmp, port_mmio + PHY_MODE3);
2151 if (fix_phy_mode4) {
2154 m4 = readl(port_mmio + PHY_MODE4);
2156 if (hp_flags & MV_HP_ERRATA_60X1B2)
2157 tmp = readl(port_mmio + 0x310);
2159 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2161 writel(m4, port_mmio + PHY_MODE4);
2163 if (hp_flags & MV_HP_ERRATA_60X1B2)
2164 writel(tmp, port_mmio + 0x310);
2167 /* Revert values of pre-emphasis and signal amps to the saved ones */
2168 m2 = readl(port_mmio + PHY_MODE2);
2170 m2 &= ~MV_M2_PREAMP_MASK;
2171 m2 |= hpriv->signal[port].amps;
2172 m2 |= hpriv->signal[port].pre;
2175 /* according to mvSata 3.6.1, some IIE values are fixed */
2176 if (IS_GEN_IIE(hpriv)) {
2181 writel(m2, port_mmio + PHY_MODE2);
2184 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2185 unsigned int port_no)
2187 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2189 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2191 if (IS_GEN_II(hpriv)) {
2192 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2193 ifctl |= (1 << 7); /* enable gen2i speed */
2194 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2195 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2198 udelay(25); /* allow reset propagation */
2200 /* Spec never mentions clearing the bit. Marvell's driver does
2201 * clear the bit, however.
2203 writelfl(0, port_mmio + EDMA_CMD_OFS);
2205 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2207 if (IS_GEN_I(hpriv))
2212 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2213 * @ap: ATA channel to manipulate
2215 * Part of this is taken from __sata_phy_reset and modified to
2216 * not sleep since this routine gets called from interrupt level.
2219 * Inherited from caller. This is coded to safe to call at
2220 * interrupt level, i.e. it does not sleep.
2222 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2223 unsigned long deadline)
2225 struct mv_port_priv *pp = ap->private_data;
2226 struct mv_host_priv *hpriv = ap->host->private_data;
2227 void __iomem *port_mmio = mv_ap_base(ap);
2231 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2235 u32 sstatus, serror, scontrol;
2237 mv_scr_read(ap, SCR_STATUS, &sstatus);
2238 mv_scr_read(ap, SCR_ERROR, &serror);
2239 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2240 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2241 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2245 /* Issue COMRESET via SControl */
2247 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2250 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2254 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2255 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2259 } while (time_before(jiffies, deadline));
2261 /* work around errata */
2262 if (IS_GEN_II(hpriv) &&
2263 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2265 goto comreset_retry;
2269 u32 sstatus, serror, scontrol;
2271 mv_scr_read(ap, SCR_STATUS, &sstatus);
2272 mv_scr_read(ap, SCR_ERROR, &serror);
2273 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2274 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2275 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2279 if (ata_link_offline(&ap->link)) {
2280 *class = ATA_DEV_NONE;
2284 /* even after SStatus reflects that device is ready,
2285 * it seems to take a while for link to be fully
2286 * established (and thus Status no longer 0x80/0x7F),
2287 * so we poll a bit for that, here.
2291 u8 drv_stat = ata_check_status(ap);
2292 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2297 if (time_after(jiffies, deadline))
2301 /* FIXME: if we passed the deadline, the following
2302 * code probably produces an invalid result
2305 /* finally, read device signature from TF registers */
2306 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2308 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2310 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2315 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2317 struct ata_port *ap = link->ap;
2318 struct mv_port_priv *pp = ap->private_data;
2319 struct ata_eh_context *ehc = &link->eh_context;
2322 rc = mv_stop_dma(ap);
2324 ehc->i.action |= ATA_EH_HARDRESET;
2326 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2327 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2328 ehc->i.action |= ATA_EH_HARDRESET;
2331 /* if we're about to do hardreset, nothing more to do */
2332 if (ehc->i.action & ATA_EH_HARDRESET)
2335 if (ata_link_online(link))
2336 rc = ata_wait_ready(ap, deadline);
2343 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2344 unsigned long deadline)
2346 struct ata_port *ap = link->ap;
2347 struct mv_host_priv *hpriv = ap->host->private_data;
2348 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2352 mv_channel_reset(hpriv, mmio, ap->port_no);
2354 mv_phy_reset(ap, class, deadline);
2359 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2361 struct ata_port *ap = link->ap;
2364 /* print link status */
2365 sata_print_link_status(link);
2368 sata_scr_read(link, SCR_ERROR, &serr);
2369 sata_scr_write_flush(link, SCR_ERROR, serr);
2371 /* bail out if no device is present */
2372 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2373 DPRINTK("EXIT, no device\n");
2377 /* set up device control */
2378 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2381 static void mv_error_handler(struct ata_port *ap)
2383 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2384 mv_hardreset, mv_postreset);
2387 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2389 mv_stop_dma(qc->ap);
2392 static void mv_eh_freeze(struct ata_port *ap)
2394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2399 /* FIXME: handle coalescing completion events properly */
2401 shift = ap->port_no * 2;
2405 mask = 0x3 << shift;
2407 /* disable assertion of portN err, done events */
2408 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2409 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2412 static void mv_eh_thaw(struct ata_port *ap)
2414 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2416 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2417 void __iomem *port_mmio = mv_ap_base(ap);
2418 u32 tmp, mask, hc_irq_cause;
2419 unsigned int shift, hc_port_no = ap->port_no;
2421 /* FIXME: handle coalescing completion events properly */
2423 shift = ap->port_no * 2;
2429 mask = 0x3 << shift;
2431 /* clear EDMA errors on this port */
2432 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2434 /* clear pending irq events */
2435 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2436 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2437 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2438 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2440 /* enable assertion of portN err, done events */
2441 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2442 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2446 * mv_port_init - Perform some early initialization on a single port.
2447 * @port: libata data structure storing shadow register addresses
2448 * @port_mmio: base address of the port
2450 * Initialize shadow register mmio addresses, clear outstanding
2451 * interrupts on the port, and unmask interrupts for the future
2452 * start of the port.
2455 * Inherited from caller.
2457 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2459 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2462 /* PIO related setup
2464 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2466 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2467 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2468 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2469 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2470 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2471 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2473 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2474 /* special case: control/altstatus doesn't have ATA_REG_ address */
2475 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2478 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2480 /* Clear any currently outstanding port interrupt conditions */
2481 serr_ofs = mv_scr_offset(SCR_ERROR);
2482 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2483 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2485 /* unmask all non-transient EDMA error interrupts */
2486 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2488 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2489 readl(port_mmio + EDMA_CFG_OFS),
2490 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2491 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2494 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2496 struct pci_dev *pdev = to_pci_dev(host->dev);
2497 struct mv_host_priv *hpriv = host->private_data;
2498 u32 hp_flags = hpriv->hp_flags;
2500 switch (board_idx) {
2502 hpriv->ops = &mv5xxx_ops;
2503 hp_flags |= MV_HP_GEN_I;
2505 switch (pdev->revision) {
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying 50XXB2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2522 hpriv->ops = &mv5xxx_ops;
2523 hp_flags |= MV_HP_GEN_I;
2525 switch (pdev->revision) {
2527 hp_flags |= MV_HP_ERRATA_50XXB0;
2530 hp_flags |= MV_HP_ERRATA_50XXB2;
2533 dev_printk(KERN_WARNING, &pdev->dev,
2534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_50XXB2;
2542 hpriv->ops = &mv6xxx_ops;
2543 hp_flags |= MV_HP_GEN_II;
2545 switch (pdev->revision) {
2547 hp_flags |= MV_HP_ERRATA_60X1B2;
2550 hp_flags |= MV_HP_ERRATA_60X1C0;
2553 dev_printk(KERN_WARNING, &pdev->dev,
2554 "Applying B2 workarounds to unknown rev\n");
2555 hp_flags |= MV_HP_ERRATA_60X1B2;
2561 hp_flags |= MV_HP_PCIE;
2562 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2563 (pdev->device == 0x2300 || pdev->device == 0x2310))
2566 * Highpoint RocketRAID PCIe 23xx series cards:
2568 * Unconfigured drives are treated as "Legacy"
2569 * by the BIOS, and it overwrites sector 8 with
2570 * a "Lgcy" metadata block prior to Linux boot.
2572 * Configured drives (RAID or JBOD) leave sector 8
2573 * alone, but instead overwrite a high numbered
2574 * sector for the RAID metadata. This sector can
2575 * be determined exactly, by truncating the physical
2576 * drive capacity to a nice even GB value.
2578 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2580 * Warn the user, lest they think we're just buggy.
2582 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2583 " BIOS CORRUPTS DATA on all attached drives,"
2584 " regardless of if/how they are configured."
2586 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2587 " use sectors 8-9 on \"Legacy\" drives,"
2588 " and avoid the final two gigabytes on"
2589 " all RocketRAID BIOS initialized drives.\n");
2592 hpriv->ops = &mv6xxx_ops;
2593 hp_flags |= MV_HP_GEN_IIE;
2595 switch (pdev->revision) {
2597 hp_flags |= MV_HP_ERRATA_XX42A0;
2600 hp_flags |= MV_HP_ERRATA_60X1C0;
2603 dev_printk(KERN_WARNING, &pdev->dev,
2604 "Applying 60X1C0 workarounds to unknown rev\n");
2605 hp_flags |= MV_HP_ERRATA_60X1C0;
2611 dev_printk(KERN_ERR, &pdev->dev,
2612 "BUG: invalid board index %u\n", board_idx);
2616 hpriv->hp_flags = hp_flags;
2617 if (hp_flags & MV_HP_PCIE) {
2618 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2619 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2620 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2622 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2623 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2624 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2631 * mv_init_host - Perform some early initialization of the host.
2632 * @host: ATA host to initialize
2633 * @board_idx: controller index
2635 * If possible, do an early global reset of the host. Then do
2636 * our port init and clear/unmask all/relevant host interrupts.
2639 * Inherited from caller.
2641 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2643 int rc = 0, n_hc, port, hc;
2644 struct pci_dev *pdev = to_pci_dev(host->dev);
2645 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2646 struct mv_host_priv *hpriv = host->private_data;
2648 /* global interrupt mask */
2649 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2651 rc = mv_chip_id(host, board_idx);
2655 n_hc = mv_get_hc_count(host->ports[0]->flags);
2657 for (port = 0; port < host->n_ports; port++)
2658 hpriv->ops->read_preamp(hpriv, port, mmio);
2660 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2664 hpriv->ops->reset_flash(hpriv, mmio);
2665 hpriv->ops->reset_bus(pdev, mmio);
2666 hpriv->ops->enable_leds(hpriv, mmio);
2668 for (port = 0; port < host->n_ports; port++) {
2669 if (IS_GEN_II(hpriv)) {
2670 void __iomem *port_mmio = mv_port_base(mmio, port);
2672 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2673 ifctl |= (1 << 7); /* enable gen2i speed */
2674 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2675 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2678 hpriv->ops->phy_errata(hpriv, mmio, port);
2681 for (port = 0; port < host->n_ports; port++) {
2682 struct ata_port *ap = host->ports[port];
2683 void __iomem *port_mmio = mv_port_base(mmio, port);
2684 unsigned int offset = port_mmio - mmio;
2686 mv_port_init(&ap->ioaddr, port_mmio);
2688 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2689 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2692 for (hc = 0; hc < n_hc; hc++) {
2693 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2695 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2696 "(before clear)=0x%08x\n", hc,
2697 readl(hc_mmio + HC_CFG_OFS),
2698 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2700 /* Clear any currently outstanding hc interrupt conditions */
2701 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2704 /* Clear any currently outstanding host interrupt conditions */
2705 writelfl(0, mmio + hpriv->irq_cause_ofs);
2707 /* and unmask interrupt generation for host regs */
2708 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2710 if (IS_GEN_I(hpriv))
2711 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2713 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2715 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2716 "PCI int cause/mask=0x%08x/0x%08x\n",
2717 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2718 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2719 readl(mmio + hpriv->irq_cause_ofs),
2720 readl(mmio + hpriv->irq_mask_ofs));
2727 * mv_print_info - Dump key info to kernel log for perusal.
2728 * @host: ATA host to print info about
2730 * FIXME: complete this.
2733 * Inherited from caller.
2735 static void mv_print_info(struct ata_host *host)
2737 struct pci_dev *pdev = to_pci_dev(host->dev);
2738 struct mv_host_priv *hpriv = host->private_data;
2740 const char *scc_s, *gen;
2742 /* Use this to determine the HW stepping of the chip so we know
2743 * what errata to workaround
2745 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2748 else if (scc == 0x01)
2753 if (IS_GEN_I(hpriv))
2755 else if (IS_GEN_II(hpriv))
2757 else if (IS_GEN_IIE(hpriv))
2762 dev_printk(KERN_INFO, &pdev->dev,
2763 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2764 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2765 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2769 * mv_init_one - handle a positive probe of a Marvell host
2770 * @pdev: PCI device found
2771 * @ent: PCI device ID entry for the matched host
2774 * Inherited from caller.
2776 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2778 static int printed_version;
2779 unsigned int board_idx = (unsigned int)ent->driver_data;
2780 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2781 struct ata_host *host;
2782 struct mv_host_priv *hpriv;
2785 if (!printed_version++)
2786 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2789 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2791 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2792 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2793 if (!host || !hpriv)
2795 host->private_data = hpriv;
2797 /* acquire resources */
2798 rc = pcim_enable_device(pdev);
2802 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2804 pcim_pin_device(pdev);
2807 host->iomap = pcim_iomap_table(pdev);
2809 rc = pci_go_64(pdev);
2813 /* initialize adapter */
2814 rc = mv_init_host(host, board_idx);
2818 /* Enable interrupts */
2819 if (msi && pci_enable_msi(pdev))
2822 mv_dump_pci_cfg(pdev, 0x68);
2823 mv_print_info(host);
2825 pci_set_master(pdev);
2826 pci_try_set_mwi(pdev);
2827 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2828 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2831 static int __init mv_init(void)
2833 return pci_register_driver(&mv_pci_driver);
2836 static void __exit mv_exit(void)
2838 pci_unregister_driver(&mv_pci_driver);
2841 MODULE_AUTHOR("Brett Russ");
2842 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2843 MODULE_LICENSE("GPL");
2844 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2845 MODULE_VERSION(DRV_VERSION);
2847 module_param(msi, int, 0444);
2848 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2850 module_init(mv_init);
2851 module_exit(mv_exit);