2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
220 SATA_INTERFACE_CTL = 0x050,
222 MV_M2_PREAMP_MASK = 0x7e0,
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
310 EDMA_RSP_Q_PTR_SHIFT = 3,
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
317 EDMA_IORDY_TMOUT = 0x34,
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
326 MV_HP_ERRATA_XX42A0 = (1 << 5),
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
337 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
338 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
339 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
342 /* DMA boundary 0xffff is required by the s/g splitting
343 * we need on /length/ in mv_fill-sg().
345 MV_DMA_BOUNDARY = 0xffffU,
347 /* mask of register bits containing lower 32 bits
348 * of EDMA request queue DMA address
350 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
352 /* ditto, for response queue */
353 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
366 /* Command ReQuest Block: 32B */
382 /* Command ResPonse Block: 8B */
389 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
397 struct mv_port_priv {
398 struct mv_crqb *crqb;
400 struct mv_crpb *crpb;
402 struct mv_sg *sg_tbl;
403 dma_addr_t sg_tbl_dma;
405 unsigned int req_idx;
406 unsigned int resp_idx;
411 struct mv_port_signal {
416 struct mv_host_priv {
418 struct mv_port_signal signal[8];
419 const struct mv_hw_ops *ops;
426 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
429 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
434 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
437 static void mv_irq_clear(struct ata_port *ap);
438 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
439 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
440 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
441 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
442 static int mv_port_start(struct ata_port *ap);
443 static void mv_port_stop(struct ata_port *ap);
444 static void mv_qc_prep(struct ata_queued_cmd *qc);
445 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
446 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
447 static void mv_error_handler(struct ata_port *ap);
448 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
449 static void mv_eh_freeze(struct ata_port *ap);
450 static void mv_eh_thaw(struct ata_port *ap);
451 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
453 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
456 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
461 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
463 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
466 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
471 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
472 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int port_no);
474 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio);
477 static struct scsi_host_template mv5_sht = {
478 .module = THIS_MODULE,
480 .ioctl = ata_scsi_ioctl,
481 .queuecommand = ata_scsi_queuecmd,
482 .can_queue = ATA_DEF_QUEUE,
483 .this_id = ATA_SHT_THIS_ID,
484 .sg_tablesize = MV_MAX_SG_CT / 2,
485 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
486 .emulated = ATA_SHT_EMULATED,
488 .proc_name = DRV_NAME,
489 .dma_boundary = MV_DMA_BOUNDARY,
490 .slave_configure = ata_scsi_slave_config,
491 .slave_destroy = ata_scsi_slave_destroy,
492 .bios_param = ata_std_bios_param,
495 static struct scsi_host_template mv6_sht = {
496 .module = THIS_MODULE,
498 .ioctl = ata_scsi_ioctl,
499 .queuecommand = ata_scsi_queuecmd,
500 .can_queue = ATA_DEF_QUEUE,
501 .this_id = ATA_SHT_THIS_ID,
502 .sg_tablesize = MV_MAX_SG_CT / 2,
503 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
504 .emulated = ATA_SHT_EMULATED,
506 .proc_name = DRV_NAME,
507 .dma_boundary = MV_DMA_BOUNDARY,
508 .slave_configure = ata_scsi_slave_config,
509 .slave_destroy = ata_scsi_slave_destroy,
510 .bios_param = ata_std_bios_param,
513 static const struct ata_port_operations mv5_ops = {
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
520 .cable_detect = ata_cable_sata,
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
534 .scr_read = mv5_scr_read,
535 .scr_write = mv5_scr_write,
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
541 static const struct ata_port_operations mv6_ops = {
542 .tf_load = ata_tf_load,
543 .tf_read = ata_tf_read,
544 .check_status = ata_check_status,
545 .exec_command = ata_exec_command,
546 .dev_select = ata_std_dev_select,
548 .cable_detect = ata_cable_sata,
550 .qc_prep = mv_qc_prep,
551 .qc_issue = mv_qc_issue,
552 .data_xfer = ata_data_xfer,
554 .irq_clear = mv_irq_clear,
555 .irq_on = ata_irq_on,
557 .error_handler = mv_error_handler,
558 .post_internal_cmd = mv_post_int_cmd,
559 .freeze = mv_eh_freeze,
562 .scr_read = mv_scr_read,
563 .scr_write = mv_scr_write,
565 .port_start = mv_port_start,
566 .port_stop = mv_port_stop,
569 static const struct ata_port_operations mv_iie_ops = {
570 .tf_load = ata_tf_load,
571 .tf_read = ata_tf_read,
572 .check_status = ata_check_status,
573 .exec_command = ata_exec_command,
574 .dev_select = ata_std_dev_select,
576 .cable_detect = ata_cable_sata,
578 .qc_prep = mv_qc_prep_iie,
579 .qc_issue = mv_qc_issue,
580 .data_xfer = ata_data_xfer,
582 .irq_clear = mv_irq_clear,
583 .irq_on = ata_irq_on,
585 .error_handler = mv_error_handler,
586 .post_internal_cmd = mv_post_int_cmd,
587 .freeze = mv_eh_freeze,
590 .scr_read = mv_scr_read,
591 .scr_write = mv_scr_write,
593 .port_start = mv_port_start,
594 .port_stop = mv_port_stop,
597 static const struct ata_port_info mv_port_info[] = {
599 .flags = MV_COMMON_FLAGS,
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv5_ops,
605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv5_ops,
611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv5_ops,
617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
618 .pio_mask = 0x1f, /* pio0-4 */
619 .udma_mask = ATA_UDMA6,
620 .port_ops = &mv6_ops,
623 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
625 .pio_mask = 0x1f, /* pio0-4 */
626 .udma_mask = ATA_UDMA6,
627 .port_ops = &mv6_ops,
630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv_iie_ops,
636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
637 .pio_mask = 0x1f, /* pio0-4 */
638 .udma_mask = ATA_UDMA6,
639 .port_ops = &mv_iie_ops,
643 static const struct pci_device_id mv_pci_tbl[] = {
644 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
645 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
646 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
647 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
648 /* RocketRAID 1740/174x have different identifiers */
649 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
650 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
652 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
653 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
654 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
655 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
656 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
658 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
661 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
663 /* Marvell 7042 support */
664 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
666 /* Highpoint RocketRAID PCIe series */
667 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
668 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
670 { } /* terminate list */
673 static struct pci_driver mv_pci_driver = {
675 .id_table = mv_pci_tbl,
676 .probe = mv_init_one,
677 .remove = ata_pci_remove_one,
680 static const struct mv_hw_ops mv5xxx_ops = {
681 .phy_errata = mv5_phy_errata,
682 .enable_leds = mv5_enable_leds,
683 .read_preamp = mv5_read_preamp,
684 .reset_hc = mv5_reset_hc,
685 .reset_flash = mv5_reset_flash,
686 .reset_bus = mv5_reset_bus,
689 static const struct mv_hw_ops mv6xxx_ops = {
690 .phy_errata = mv6_phy_errata,
691 .enable_leds = mv6_enable_leds,
692 .read_preamp = mv6_read_preamp,
693 .reset_hc = mv6_reset_hc,
694 .reset_flash = mv6_reset_flash,
695 .reset_bus = mv_reset_pci_bus,
701 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
704 /* move to PCI layer or libata core? */
705 static int pci_go_64(struct pci_dev *pdev)
709 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
710 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
712 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
714 dev_printk(KERN_ERR, &pdev->dev,
715 "64-bit DMA enable failed\n");
720 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
722 dev_printk(KERN_ERR, &pdev->dev,
723 "32-bit DMA enable failed\n");
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
728 dev_printk(KERN_ERR, &pdev->dev,
729 "32-bit consistent DMA enable failed\n");
741 static inline void writelfl(unsigned long data, void __iomem *addr)
744 (void) readl(addr); /* flush to avoid PCI posted write */
747 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
749 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
752 static inline unsigned int mv_hc_from_port(unsigned int port)
754 return port >> MV_PORT_HC_SHIFT;
757 static inline unsigned int mv_hardport_from_port(unsigned int port)
759 return port & MV_PORT_MASK;
762 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 return mv_hc_base(base, mv_hc_from_port(port));
768 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
770 return mv_hc_base_from_port(base, port) +
771 MV_SATAHC_ARBTR_REG_SZ +
772 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
775 static inline void __iomem *mv_ap_base(struct ata_port *ap)
777 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
780 static inline int mv_get_hc_count(unsigned long port_flags)
782 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
785 static void mv_irq_clear(struct ata_port *ap)
789 static void mv_set_edma_ptrs(void __iomem *port_mmio,
790 struct mv_host_priv *hpriv,
791 struct mv_port_priv *pp)
796 * initialize request queue
798 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
800 WARN_ON(pp->crqb_dma & 0x3ff);
801 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
802 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
803 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
805 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
806 writelfl((pp->crqb_dma & 0xffffffff) | index,
807 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
809 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
812 * initialize response queue
814 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
816 WARN_ON(pp->crpb_dma & 0xff);
817 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
820 writelfl((pp->crpb_dma & 0xffffffff) | index,
821 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
823 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
825 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
826 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
830 * mv_start_dma - Enable eDMA engine
831 * @base: port base address
832 * @pp: port private data
834 * Verify the local cache of the eDMA state is accurate with a
838 * Inherited from caller.
840 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
841 struct mv_port_priv *pp)
843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
844 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no);
846 void __iomem *hc_mmio = mv_hc_base_from_port(
847 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
848 u32 hc_irq_cause, ipending;
850 /* clear EDMA event indicators, if any */
851 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
853 /* clear EDMA interrupt indicator, if any */
854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
855 ipending = (DEV_IRQ << hard_port) |
856 (CRPB_DMA_DONE << hard_port);
857 if (hc_irq_cause & ipending) {
858 writelfl(hc_irq_cause & ~ipending,
859 hc_mmio + HC_IRQ_CAUSE_OFS);
862 mv_edma_cfg(ap, hpriv, port_mmio);
864 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
867 mv_set_edma_ptrs(port_mmio, hpriv, pp);
869 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
870 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
872 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
876 * __mv_stop_dma - Disable eDMA engine
877 * @ap: ATA channel to manipulate
879 * Verify the local cache of the eDMA state is accurate with a
883 * Inherited from caller.
885 static int __mv_stop_dma(struct ata_port *ap)
887 void __iomem *port_mmio = mv_ap_base(ap);
888 struct mv_port_priv *pp = ap->private_data;
892 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
893 /* Disable EDMA if active. The disable bit auto clears.
895 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
896 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
901 /* now properly wait for the eDMA to stop */
902 for (i = 1000; i > 0; i--) {
903 reg = readl(port_mmio + EDMA_CMD_OFS);
904 if (!(reg & EDMA_EN))
911 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
918 static int mv_stop_dma(struct ata_port *ap)
923 spin_lock_irqsave(&ap->host->lock, flags);
924 rc = __mv_stop_dma(ap);
925 spin_unlock_irqrestore(&ap->host->lock, flags);
931 static void mv_dump_mem(void __iomem *start, unsigned bytes)
934 for (b = 0; b < bytes; ) {
935 DPRINTK("%p: ", start + b);
936 for (w = 0; b < bytes && w < 4; w++) {
937 printk("%08x ", readl(start + b));
945 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
950 for (b = 0; b < bytes; ) {
951 DPRINTK("%02x: ", b);
952 for (w = 0; b < bytes && w < 4; w++) {
953 (void) pci_read_config_dword(pdev, b, &dw);
961 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 struct pci_dev *pdev)
965 void __iomem *hc_base = mv_hc_base(mmio_base,
966 port >> MV_PORT_HC_SHIFT);
967 void __iomem *port_base;
968 int start_port, num_ports, p, start_hc, num_hcs, hc;
971 start_hc = start_port = 0;
972 num_ports = 8; /* shld be benign for 4 port devs */
975 start_hc = port >> MV_PORT_HC_SHIFT;
977 num_ports = num_hcs = 1;
979 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
980 num_ports > 1 ? num_ports - 1 : start_port);
983 DPRINTK("PCI config space regs:\n");
984 mv_dump_pci_cfg(pdev, 0x68);
986 DPRINTK("PCI regs:\n");
987 mv_dump_mem(mmio_base+0xc00, 0x3c);
988 mv_dump_mem(mmio_base+0xd00, 0x34);
989 mv_dump_mem(mmio_base+0xf00, 0x4);
990 mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
992 hc_base = mv_hc_base(mmio_base, hc);
993 DPRINTK("HC regs (HC %i):\n", hc);
994 mv_dump_mem(hc_base, 0x1c);
996 for (p = start_port; p < start_port + num_ports; p++) {
997 port_base = mv_port_base(mmio_base, p);
998 DPRINTK("EDMA regs (port %i):\n", p);
999 mv_dump_mem(port_base, 0x54);
1000 DPRINTK("SATA regs (port %i):\n", p);
1001 mv_dump_mem(port_base+0x300, 0x60);
1006 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1010 switch (sc_reg_in) {
1014 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1017 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1026 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1028 unsigned int ofs = mv_scr_offset(sc_reg_in);
1030 if (ofs != 0xffffffffU) {
1031 *val = readl(mv_ap_base(ap) + ofs);
1037 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1039 unsigned int ofs = mv_scr_offset(sc_reg_in);
1041 if (ofs != 0xffffffffU) {
1042 writelfl(val, mv_ap_base(ap) + ofs);
1048 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1049 void __iomem *port_mmio)
1053 /* set up non-NCQ EDMA configuration */
1054 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1056 if (IS_GEN_I(hpriv))
1057 cfg |= (1 << 8); /* enab config burst size mask */
1059 else if (IS_GEN_II(hpriv))
1060 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1062 else if (IS_GEN_IIE(hpriv)) {
1063 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1064 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1065 cfg |= (1 << 18); /* enab early completion */
1066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1073 * mv_port_start - Port specific init/start routine.
1074 * @ap: ATA channel to manipulate
1076 * Allocate and point to DMA memory, init port private memory,
1080 * Inherited from caller.
1082 static int mv_port_start(struct ata_port *ap)
1084 struct device *dev = ap->host->dev;
1085 struct mv_host_priv *hpriv = ap->host->private_data;
1086 struct mv_port_priv *pp;
1087 void __iomem *port_mmio = mv_ap_base(ap);
1090 unsigned long flags;
1093 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1097 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1101 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1103 rc = ata_pad_alloc(ap, dev);
1107 /* First item in chunk of DMA memory:
1108 * 32-slot command request table (CRQB), 32 bytes each in size
1111 pp->crqb_dma = mem_dma;
1112 mem += MV_CRQB_Q_SZ;
1113 mem_dma += MV_CRQB_Q_SZ;
1116 * 32-slot command response table (CRPB), 8 bytes each in size
1119 pp->crpb_dma = mem_dma;
1120 mem += MV_CRPB_Q_SZ;
1121 mem_dma += MV_CRPB_Q_SZ;
1124 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1127 pp->sg_tbl_dma = mem_dma;
1129 spin_lock_irqsave(&ap->host->lock, flags);
1131 mv_edma_cfg(ap, hpriv, port_mmio);
1133 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1135 spin_unlock_irqrestore(&ap->host->lock, flags);
1137 /* Don't turn on EDMA here...do it before DMA commands only. Else
1138 * we'll be unable to send non-data, PIO, etc due to restricted access
1141 ap->private_data = pp;
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1149 * Stop DMA, cleanup port memory.
1152 * This routine uses the host lock to protect the DMA stop.
1154 static void mv_port_stop(struct ata_port *ap)
1160 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1161 * @qc: queued command whose SG list to source from
1163 * Populate the SG list and mark the last entry.
1166 * Inherited from caller.
1168 static void mv_fill_sg(struct ata_queued_cmd *qc)
1170 struct mv_port_priv *pp = qc->ap->private_data;
1171 struct scatterlist *sg;
1172 struct mv_sg *mv_sg, *last_sg = NULL;
1176 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1177 dma_addr_t addr = sg_dma_address(sg);
1178 u32 sg_len = sg_dma_len(sg);
1181 u32 offset = addr & 0xffff;
1184 if ((offset + sg_len > 0x10000))
1185 len = 0x10000 - offset;
1187 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1188 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1189 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1199 if (likely(last_sg))
1200 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1203 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1205 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1206 (last ? CRQB_CMD_LAST : 0);
1207 *cmdw = cpu_to_le16(tmp);
1211 * mv_qc_prep - Host specific command preparation.
1212 * @qc: queued command to prepare
1214 * This routine simply redirects to the general purpose routine
1215 * if command is not DMA. Else, it handles prep of the CRQB
1216 * (command request block), does some sanity checking, and calls
1217 * the SG load routine.
1220 * Inherited from caller.
1222 static void mv_qc_prep(struct ata_queued_cmd *qc)
1224 struct ata_port *ap = qc->ap;
1225 struct mv_port_priv *pp = ap->private_data;
1227 struct ata_taskfile *tf;
1231 if (qc->tf.protocol != ATA_PROT_DMA)
1234 /* Fill in command request block
1236 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1237 flags |= CRQB_FLAG_READ;
1238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1239 flags |= qc->tag << CRQB_TAG_SHIFT;
1240 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1242 /* get current queue index from software */
1243 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1245 pp->crqb[in_index].sg_addr =
1246 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1247 pp->crqb[in_index].sg_addr_hi =
1248 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1249 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1251 cw = &pp->crqb[in_index].ata_cmd[0];
1254 /* Sadly, the CRQB cannot accomodate all registers--there are
1255 * only 11 bytes...so we must pick and choose required
1256 * registers based on the command. So, we drop feature and
1257 * hob_feature for [RW] DMA commands, but they are needed for
1258 * NCQ. NCQ will drop hob_nsect.
1260 switch (tf->command) {
1262 case ATA_CMD_READ_EXT:
1264 case ATA_CMD_WRITE_EXT:
1265 case ATA_CMD_WRITE_FUA_EXT:
1266 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1268 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
1271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1274 #endif /* FIXME: remove this line when NCQ added */
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1284 BUG_ON(tf->command);
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1312 * Inherited from caller.
1314 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
1323 if (qc->tf.protocol != ATA_PROT_DMA)
1326 /* Fill in Gen IIE command request block
1328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1332 flags |= qc->tag << CRQB_TAG_SHIFT;
1333 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1334 what we use as our tag */
1336 /* get current queue index from software */
1337 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1339 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1340 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1341 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1342 crqb->flags = cpu_to_le32(flags);
1345 crqb->ata_cmd[0] = cpu_to_le32(
1346 (tf->command << 16) |
1349 crqb->ata_cmd[1] = cpu_to_le32(
1355 crqb->ata_cmd[2] = cpu_to_le32(
1356 (tf->hob_lbal << 0) |
1357 (tf->hob_lbam << 8) |
1358 (tf->hob_lbah << 16) |
1359 (tf->hob_feature << 24)
1361 crqb->ata_cmd[3] = cpu_to_le32(
1363 (tf->hob_nsect << 8)
1366 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1372 * mv_qc_issue - Initiate a command to the host
1373 * @qc: queued command to start
1375 * This routine simply redirects to the general purpose routine
1376 * if command is not DMA. Else, it sanity checks our local
1377 * caches of the request producer/consumer indices then enables
1378 * DMA and bumps the request producer index.
1381 * Inherited from caller.
1383 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1385 struct ata_port *ap = qc->ap;
1386 void __iomem *port_mmio = mv_ap_base(ap);
1387 struct mv_port_priv *pp = ap->private_data;
1390 if (qc->tf.protocol != ATA_PROT_DMA) {
1391 /* We're about to send a non-EDMA capable command to the
1392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1396 return ata_qc_issue_prot(qc);
1399 mv_start_dma(ap, port_mmio, pp);
1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1403 /* until we do queuing, the queue should be empty at this point */
1404 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1405 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1409 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1411 /* and write the request in pointer to kick the EDMA to life */
1412 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1413 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1419 * mv_err_intr - Handle error interrupts on the port
1420 * @ap: ATA channel to manipulate
1421 * @reset_allowed: bool: 0 == don't trigger from reset here
1423 * In most cases, just clear the interrupt and move on. However,
1424 * some cases require an eDMA reset, which is done right before
1425 * the COMRESET in mv_phy_reset(). The SERR case requires a
1426 * clear of pending errors in the SATA SERROR register. Finally,
1427 * if the port disabled DMA, update our cached copy to match.
1430 * Inherited from caller.
1432 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1434 void __iomem *port_mmio = mv_ap_base(ap);
1435 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1436 struct mv_port_priv *pp = ap->private_data;
1437 struct mv_host_priv *hpriv = ap->host->private_data;
1438 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1439 unsigned int action = 0, err_mask = 0;
1440 struct ata_eh_info *ehi = &ap->link.eh_info;
1442 ata_ehi_clear_desc(ehi);
1444 if (!edma_enabled) {
1445 /* just a guess: do we need to do this? should we
1446 * expand this, and do it in all cases?
1448 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1449 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1452 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1454 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1457 * all generations share these EDMA error cause bits
1460 if (edma_err_cause & EDMA_ERR_DEV)
1461 err_mask |= AC_ERR_DEV;
1462 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1463 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1464 EDMA_ERR_INTRL_PAR)) {
1465 err_mask |= AC_ERR_ATA_BUS;
1466 action |= ATA_EH_HARDRESET;
1467 ata_ehi_push_desc(ehi, "parity error");
1469 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1470 ata_ehi_hotplugged(ehi);
1471 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1472 "dev disconnect" : "dev connect");
1473 action |= ATA_EH_HARDRESET;
1476 if (IS_GEN_I(hpriv)) {
1477 eh_freeze_mask = EDMA_EH_FREEZE_5;
1479 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1480 struct mv_port_priv *pp = ap->private_data;
1481 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1482 ata_ehi_push_desc(ehi, "EDMA self-disable");
1485 eh_freeze_mask = EDMA_EH_FREEZE;
1487 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1488 struct mv_port_priv *pp = ap->private_data;
1489 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1490 ata_ehi_push_desc(ehi, "EDMA self-disable");
1493 if (edma_err_cause & EDMA_ERR_SERR) {
1494 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1495 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1496 err_mask = AC_ERR_ATA_BUS;
1497 action |= ATA_EH_HARDRESET;
1501 /* Clear EDMA now that SERR cleanup done */
1502 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1505 err_mask = AC_ERR_OTHER;
1506 action |= ATA_EH_HARDRESET;
1509 ehi->serror |= serr;
1510 ehi->action |= action;
1513 qc->err_mask |= err_mask;
1515 ehi->err_mask |= err_mask;
1517 if (edma_err_cause & eh_freeze_mask)
1518 ata_port_freeze(ap);
1523 static void mv_intr_pio(struct ata_port *ap)
1525 struct ata_queued_cmd *qc;
1528 /* ignore spurious intr if drive still BUSY */
1529 ata_status = readb(ap->ioaddr.status_addr);
1530 if (unlikely(ata_status & ATA_BUSY))
1533 /* get active ATA command */
1534 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1535 if (unlikely(!qc)) /* no active tag */
1537 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1540 /* and finally, complete the ATA command */
1541 qc->err_mask |= ac_err_mask(ata_status);
1542 ata_qc_complete(qc);
1545 static void mv_intr_edma(struct ata_port *ap)
1547 void __iomem *port_mmio = mv_ap_base(ap);
1548 struct mv_host_priv *hpriv = ap->host->private_data;
1549 struct mv_port_priv *pp = ap->private_data;
1550 struct ata_queued_cmd *qc;
1551 u32 out_index, in_index;
1552 bool work_done = false;
1554 /* get h/w response queue pointer */
1555 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1556 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1562 /* get s/w response queue last-read pointer, and compare */
1563 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1564 if (in_index == out_index)
1567 /* 50xx: get active ATA command */
1568 if (IS_GEN_I(hpriv))
1569 tag = ap->link.active_tag;
1571 /* Gen II/IIE: get active ATA command via tag, to enable
1572 * support for queueing. this works transparently for
1573 * queued and non-queued modes.
1575 else if (IS_GEN_II(hpriv))
1576 tag = (le16_to_cpu(pp->crpb[out_index].id)
1577 >> CRPB_IOID_SHIFT_6) & 0x3f;
1579 else /* IS_GEN_IIE */
1580 tag = (le16_to_cpu(pp->crpb[out_index].id)
1581 >> CRPB_IOID_SHIFT_7) & 0x3f;
1583 qc = ata_qc_from_tag(ap, tag);
1585 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1586 * bits (WARNING: might not necessarily be associated
1587 * with this command), which -should- be clear
1590 status = le16_to_cpu(pp->crpb[out_index].flags);
1591 if (unlikely(status & 0xff)) {
1592 mv_err_intr(ap, qc);
1596 /* and finally, complete the ATA command */
1599 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1600 ata_qc_complete(qc);
1603 /* advance software response queue pointer, to
1604 * indicate (after the loop completes) to hardware
1605 * that we have consumed a response queue entry.
1612 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1613 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1614 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1618 * mv_host_intr - Handle all interrupts on the given host controller
1619 * @host: host specific structure
1620 * @relevant: port error bits relevant to this host controller
1621 * @hc: which host controller we're to look at
1623 * Read then write clear the HC interrupt status then walk each
1624 * port connected to the HC and see if it needs servicing. Port
1625 * success ints are reported in the HC interrupt status reg, the
1626 * port error ints are reported in the higher level main
1627 * interrupt status register and thus are passed in via the
1628 * 'relevant' argument.
1631 * Inherited from caller.
1633 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1635 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1636 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1643 port0 = MV_PORTS_PER_HC;
1645 /* we'll need the HC success int register in most cases */
1646 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1650 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1652 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1653 hc, relevant, hc_irq_cause);
1655 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1656 struct ata_port *ap = host->ports[port];
1657 struct mv_port_priv *pp = ap->private_data;
1658 int have_err_bits, hard_port, shift;
1660 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1663 shift = port << 1; /* (port * 2) */
1664 if (port >= MV_PORTS_PER_HC) {
1665 shift++; /* skip bit 8 in the HC Main IRQ reg */
1667 have_err_bits = ((PORT0_ERR << shift) & relevant);
1669 if (unlikely(have_err_bits)) {
1670 struct ata_queued_cmd *qc;
1672 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1673 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1676 mv_err_intr(ap, qc);
1680 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1682 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1683 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1686 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1693 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1695 struct mv_host_priv *hpriv = host->private_data;
1696 struct ata_port *ap;
1697 struct ata_queued_cmd *qc;
1698 struct ata_eh_info *ehi;
1699 unsigned int i, err_mask, printed = 0;
1702 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1704 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1707 DPRINTK("All regs @ PCI error\n");
1708 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1710 writelfl(0, mmio + hpriv->irq_cause_ofs);
1712 for (i = 0; i < host->n_ports; i++) {
1713 ap = host->ports[i];
1714 if (!ata_link_offline(&ap->link)) {
1715 ehi = &ap->link.eh_info;
1716 ata_ehi_clear_desc(ehi);
1718 ata_ehi_push_desc(ehi,
1719 "PCI err cause 0x%08x", err_cause);
1720 err_mask = AC_ERR_HOST_BUS;
1721 ehi->action = ATA_EH_HARDRESET;
1722 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1724 qc->err_mask |= err_mask;
1726 ehi->err_mask |= err_mask;
1728 ata_port_freeze(ap);
1734 * mv_interrupt - Main interrupt event handler
1736 * @dev_instance: private data; in this case the host structure
1738 * Read the read only register to determine if any host
1739 * controllers have pending interrupts. If so, call lower level
1740 * routine to handle. Also check for PCI errors which are only
1744 * This routine holds the host lock while processing pending
1747 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1749 struct ata_host *host = dev_instance;
1750 unsigned int hc, handled = 0, n_hcs;
1751 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1752 u32 irq_stat, irq_mask;
1754 spin_lock(&host->lock);
1755 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1756 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1758 /* check the cases where we either have nothing pending or have read
1759 * a bogus register value which can indicate HW removal or PCI fault
1761 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1764 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1766 if (unlikely(irq_stat & PCI_ERR)) {
1767 mv_pci_error(host, mmio);
1769 goto out_unlock; /* skip all other HC irq handling */
1772 for (hc = 0; hc < n_hcs; hc++) {
1773 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1775 mv_host_intr(host, relevant, hc);
1781 spin_unlock(&host->lock);
1783 return IRQ_RETVAL(handled);
1786 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1788 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1789 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1791 return hc_mmio + ofs;
1794 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1798 switch (sc_reg_in) {
1802 ofs = sc_reg_in * sizeof(u32);
1811 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1813 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1814 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1815 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1817 if (ofs != 0xffffffffU) {
1818 *val = readl(addr + ofs);
1824 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1826 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1827 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1828 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1830 if (ofs != 0xffffffffU) {
1831 writelfl(val, addr + ofs);
1837 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1841 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1844 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1846 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1849 mv_reset_pci_bus(pdev, mmio);
1852 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1854 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1857 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1860 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1863 tmp = readl(phy_mmio + MV5_PHY_MODE);
1865 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1866 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1869 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1873 writel(0, mmio + MV_GPIO_PORT_CTL);
1875 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1877 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1879 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1882 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1885 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1886 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1888 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1891 tmp = readl(phy_mmio + MV5_LT_MODE);
1893 writel(tmp, phy_mmio + MV5_LT_MODE);
1895 tmp = readl(phy_mmio + MV5_PHY_CTL);
1898 writel(tmp, phy_mmio + MV5_PHY_CTL);
1901 tmp = readl(phy_mmio + MV5_PHY_MODE);
1903 tmp |= hpriv->signal[port].pre;
1904 tmp |= hpriv->signal[port].amps;
1905 writel(tmp, phy_mmio + MV5_PHY_MODE);
1910 #define ZERO(reg) writel(0, port_mmio + (reg))
1911 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1914 void __iomem *port_mmio = mv_port_base(mmio, port);
1916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1918 mv_channel_reset(hpriv, mmio, port);
1920 ZERO(0x028); /* command */
1921 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1922 ZERO(0x004); /* timer */
1923 ZERO(0x008); /* irq err cause */
1924 ZERO(0x00c); /* irq err mask */
1925 ZERO(0x010); /* rq bah */
1926 ZERO(0x014); /* rq inp */
1927 ZERO(0x018); /* rq outp */
1928 ZERO(0x01c); /* respq bah */
1929 ZERO(0x024); /* respq outp */
1930 ZERO(0x020); /* respq inp */
1931 ZERO(0x02c); /* test control */
1932 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1936 #define ZERO(reg) writel(0, hc_mmio + (reg))
1937 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1940 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1948 tmp = readl(hc_mmio + 0x20);
1951 writel(tmp, hc_mmio + 0x20);
1955 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1958 unsigned int hc, port;
1960 for (hc = 0; hc < n_hc; hc++) {
1961 for (port = 0; port < MV_PORTS_PER_HC; port++)
1962 mv5_reset_hc_port(hpriv, mmio,
1963 (hc * MV_PORTS_PER_HC) + port);
1965 mv5_reset_one_hc(hpriv, mmio, hc);
1972 #define ZERO(reg) writel(0, mmio + (reg))
1973 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1975 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1976 struct mv_host_priv *hpriv = host->private_data;
1979 tmp = readl(mmio + MV_PCI_MODE);
1981 writel(tmp, mmio + MV_PCI_MODE);
1983 ZERO(MV_PCI_DISC_TIMER);
1984 ZERO(MV_PCI_MSI_TRIGGER);
1985 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1986 ZERO(HC_MAIN_IRQ_MASK_OFS);
1987 ZERO(MV_PCI_SERR_MASK);
1988 ZERO(hpriv->irq_cause_ofs);
1989 ZERO(hpriv->irq_mask_ofs);
1990 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1991 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1992 ZERO(MV_PCI_ERR_ATTRIBUTE);
1993 ZERO(MV_PCI_ERR_COMMAND);
1997 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2001 mv5_reset_flash(hpriv, mmio);
2003 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2005 tmp |= (1 << 5) | (1 << 6);
2006 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2010 * mv6_reset_hc - Perform the 6xxx global soft reset
2011 * @mmio: base address of the HBA
2013 * This routine only applies to 6xxx parts.
2016 * Inherited from caller.
2018 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2021 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2025 /* Following procedure defined in PCI "main command and status
2029 writel(t | STOP_PCI_MASTER, reg);
2031 for (i = 0; i < 1000; i++) {
2034 if (PCI_MASTER_EMPTY & t)
2037 if (!(PCI_MASTER_EMPTY & t)) {
2038 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2046 writel(t | GLOB_SFT_RST, reg);
2049 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2051 if (!(GLOB_SFT_RST & t)) {
2052 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2057 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2060 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2063 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2065 if (GLOB_SFT_RST & t) {
2066 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2073 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2076 void __iomem *port_mmio;
2079 tmp = readl(mmio + MV_RESET_CFG);
2080 if ((tmp & (1 << 0)) == 0) {
2081 hpriv->signal[idx].amps = 0x7 << 8;
2082 hpriv->signal[idx].pre = 0x1 << 5;
2086 port_mmio = mv_port_base(mmio, idx);
2087 tmp = readl(port_mmio + PHY_MODE2);
2089 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2090 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2093 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2095 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2098 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2101 void __iomem *port_mmio = mv_port_base(mmio, port);
2103 u32 hp_flags = hpriv->hp_flags;
2105 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2107 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2110 if (fix_phy_mode2) {
2111 m2 = readl(port_mmio + PHY_MODE2);
2114 writel(m2, port_mmio + PHY_MODE2);
2118 m2 = readl(port_mmio + PHY_MODE2);
2119 m2 &= ~((1 << 16) | (1 << 31));
2120 writel(m2, port_mmio + PHY_MODE2);
2125 /* who knows what this magic does */
2126 tmp = readl(port_mmio + PHY_MODE3);
2129 writel(tmp, port_mmio + PHY_MODE3);
2131 if (fix_phy_mode4) {
2134 m4 = readl(port_mmio + PHY_MODE4);
2136 if (hp_flags & MV_HP_ERRATA_60X1B2)
2137 tmp = readl(port_mmio + 0x310);
2139 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2141 writel(m4, port_mmio + PHY_MODE4);
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 writel(tmp, port_mmio + 0x310);
2147 /* Revert values of pre-emphasis and signal amps to the saved ones */
2148 m2 = readl(port_mmio + PHY_MODE2);
2150 m2 &= ~MV_M2_PREAMP_MASK;
2151 m2 |= hpriv->signal[port].amps;
2152 m2 |= hpriv->signal[port].pre;
2155 /* according to mvSata 3.6.1, some IIE values are fixed */
2156 if (IS_GEN_IIE(hpriv)) {
2161 writel(m2, port_mmio + PHY_MODE2);
2164 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2165 unsigned int port_no)
2167 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2169 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2171 if (IS_GEN_II(hpriv)) {
2172 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2173 ifctl |= (1 << 7); /* enable gen2i speed */
2174 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2175 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2178 udelay(25); /* allow reset propagation */
2180 /* Spec never mentions clearing the bit. Marvell's driver does
2181 * clear the bit, however.
2183 writelfl(0, port_mmio + EDMA_CMD_OFS);
2185 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2187 if (IS_GEN_I(hpriv))
2192 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2193 * @ap: ATA channel to manipulate
2195 * Part of this is taken from __sata_phy_reset and modified to
2196 * not sleep since this routine gets called from interrupt level.
2199 * Inherited from caller. This is coded to safe to call at
2200 * interrupt level, i.e. it does not sleep.
2202 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2203 unsigned long deadline)
2205 struct mv_port_priv *pp = ap->private_data;
2206 struct mv_host_priv *hpriv = ap->host->private_data;
2207 void __iomem *port_mmio = mv_ap_base(ap);
2211 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2215 u32 sstatus, serror, scontrol;
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2225 /* Issue COMRESET via SControl */
2227 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2230 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2234 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2235 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2239 } while (time_before(jiffies, deadline));
2241 /* work around errata */
2242 if (IS_GEN_II(hpriv) &&
2243 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2245 goto comreset_retry;
2249 u32 sstatus, serror, scontrol;
2251 mv_scr_read(ap, SCR_STATUS, &sstatus);
2252 mv_scr_read(ap, SCR_ERROR, &serror);
2253 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2254 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2255 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2259 if (ata_link_offline(&ap->link)) {
2260 *class = ATA_DEV_NONE;
2264 /* even after SStatus reflects that device is ready,
2265 * it seems to take a while for link to be fully
2266 * established (and thus Status no longer 0x80/0x7F),
2267 * so we poll a bit for that, here.
2271 u8 drv_stat = ata_check_status(ap);
2272 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2277 if (time_after(jiffies, deadline))
2281 /* FIXME: if we passed the deadline, the following
2282 * code probably produces an invalid result
2285 /* finally, read device signature from TF registers */
2286 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2288 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2290 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2295 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2297 struct ata_port *ap = link->ap;
2298 struct mv_port_priv *pp = ap->private_data;
2299 struct ata_eh_context *ehc = &link->eh_context;
2302 rc = mv_stop_dma(ap);
2304 ehc->i.action |= ATA_EH_HARDRESET;
2306 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2307 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2308 ehc->i.action |= ATA_EH_HARDRESET;
2311 /* if we're about to do hardreset, nothing more to do */
2312 if (ehc->i.action & ATA_EH_HARDRESET)
2315 if (ata_link_online(link))
2316 rc = ata_wait_ready(ap, deadline);
2323 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2324 unsigned long deadline)
2326 struct ata_port *ap = link->ap;
2327 struct mv_host_priv *hpriv = ap->host->private_data;
2328 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2332 mv_channel_reset(hpriv, mmio, ap->port_no);
2334 mv_phy_reset(ap, class, deadline);
2339 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2341 struct ata_port *ap = link->ap;
2344 /* print link status */
2345 sata_print_link_status(link);
2348 sata_scr_read(link, SCR_ERROR, &serr);
2349 sata_scr_write_flush(link, SCR_ERROR, serr);
2351 /* bail out if no device is present */
2352 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2353 DPRINTK("EXIT, no device\n");
2357 /* set up device control */
2358 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2361 static void mv_error_handler(struct ata_port *ap)
2363 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2364 mv_hardreset, mv_postreset);
2367 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2369 mv_stop_dma(qc->ap);
2372 static void mv_eh_freeze(struct ata_port *ap)
2374 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2375 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2379 /* FIXME: handle coalescing completion events properly */
2381 shift = ap->port_no * 2;
2385 mask = 0x3 << shift;
2387 /* disable assertion of portN err, done events */
2388 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2389 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2392 static void mv_eh_thaw(struct ata_port *ap)
2394 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2395 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2396 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2397 void __iomem *port_mmio = mv_ap_base(ap);
2398 u32 tmp, mask, hc_irq_cause;
2399 unsigned int shift, hc_port_no = ap->port_no;
2401 /* FIXME: handle coalescing completion events properly */
2403 shift = ap->port_no * 2;
2409 mask = 0x3 << shift;
2411 /* clear EDMA errors on this port */
2412 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2414 /* clear pending irq events */
2415 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2416 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2417 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2418 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2420 /* enable assertion of portN err, done events */
2421 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2422 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2426 * mv_port_init - Perform some early initialization on a single port.
2427 * @port: libata data structure storing shadow register addresses
2428 * @port_mmio: base address of the port
2430 * Initialize shadow register mmio addresses, clear outstanding
2431 * interrupts on the port, and unmask interrupts for the future
2432 * start of the port.
2435 * Inherited from caller.
2437 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2439 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2442 /* PIO related setup
2444 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2446 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2447 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2448 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2449 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2450 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2451 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2453 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2454 /* special case: control/altstatus doesn't have ATA_REG_ address */
2455 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2458 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2460 /* Clear any currently outstanding port interrupt conditions */
2461 serr_ofs = mv_scr_offset(SCR_ERROR);
2462 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2463 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2465 /* unmask all non-transient EDMA error interrupts */
2466 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2468 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2469 readl(port_mmio + EDMA_CFG_OFS),
2470 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2471 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2474 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2476 struct pci_dev *pdev = to_pci_dev(host->dev);
2477 struct mv_host_priv *hpriv = host->private_data;
2478 u32 hp_flags = hpriv->hp_flags;
2480 switch (board_idx) {
2482 hpriv->ops = &mv5xxx_ops;
2483 hp_flags |= MV_HP_GEN_I;
2485 switch (pdev->revision) {
2487 hp_flags |= MV_HP_ERRATA_50XXB0;
2490 hp_flags |= MV_HP_ERRATA_50XXB2;
2493 dev_printk(KERN_WARNING, &pdev->dev,
2494 "Applying 50XXB2 workarounds to unknown rev\n");
2495 hp_flags |= MV_HP_ERRATA_50XXB2;
2502 hpriv->ops = &mv5xxx_ops;
2503 hp_flags |= MV_HP_GEN_I;
2505 switch (pdev->revision) {
2507 hp_flags |= MV_HP_ERRATA_50XXB0;
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2513 dev_printk(KERN_WARNING, &pdev->dev,
2514 "Applying B2 workarounds to unknown rev\n");
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2522 hpriv->ops = &mv6xxx_ops;
2523 hp_flags |= MV_HP_GEN_II;
2525 switch (pdev->revision) {
2527 hp_flags |= MV_HP_ERRATA_60X1B2;
2530 hp_flags |= MV_HP_ERRATA_60X1C0;
2533 dev_printk(KERN_WARNING, &pdev->dev,
2534 "Applying B2 workarounds to unknown rev\n");
2535 hp_flags |= MV_HP_ERRATA_60X1B2;
2541 hp_flags |= MV_HP_PCIE;
2542 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2543 (pdev->device == 0x2300 || pdev->device == 0x2310))
2546 * Highpoint RocketRAID PCIe 23xx series cards:
2548 * Unconfigured drives are treated as "Legacy"
2549 * by the BIOS, and it overwrites sector 8 with
2550 * a "Lgcy" metadata block prior to Linux boot.
2552 * Configured drives (RAID or JBOD) leave sector 8
2553 * alone, but instead overwrite a high numbered
2554 * sector for the RAID metadata. This sector can
2555 * be determined exactly, by truncating the physical
2556 * drive capacity to a nice even GB value.
2558 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2560 * Warn the user, lest they think we're just buggy.
2562 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2563 " BIOS CORRUPTS DATA on all attached drives,"
2564 " regardless of if/how they are configured."
2566 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2567 " use sectors 8-9 on \"Legacy\" drives,"
2568 " and avoid the final two gigabytes on"
2569 " all RocketRAID BIOS initialized drives.\n");
2572 hpriv->ops = &mv6xxx_ops;
2573 hp_flags |= MV_HP_GEN_IIE;
2575 switch (pdev->revision) {
2577 hp_flags |= MV_HP_ERRATA_XX42A0;
2580 hp_flags |= MV_HP_ERRATA_60X1C0;
2583 dev_printk(KERN_WARNING, &pdev->dev,
2584 "Applying 60X1C0 workarounds to unknown rev\n");
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
2591 dev_printk(KERN_ERR, &pdev->dev,
2592 "BUG: invalid board index %u\n", board_idx);
2596 hpriv->hp_flags = hp_flags;
2597 if (hp_flags & MV_HP_PCIE) {
2598 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2599 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2600 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2602 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2603 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2604 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2611 * mv_init_host - Perform some early initialization of the host.
2612 * @host: ATA host to initialize
2613 * @board_idx: controller index
2615 * If possible, do an early global reset of the host. Then do
2616 * our port init and clear/unmask all/relevant host interrupts.
2619 * Inherited from caller.
2621 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2623 int rc = 0, n_hc, port, hc;
2624 struct pci_dev *pdev = to_pci_dev(host->dev);
2625 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2626 struct mv_host_priv *hpriv = host->private_data;
2628 /* global interrupt mask */
2629 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2631 rc = mv_chip_id(host, board_idx);
2635 n_hc = mv_get_hc_count(host->ports[0]->flags);
2637 for (port = 0; port < host->n_ports; port++)
2638 hpriv->ops->read_preamp(hpriv, port, mmio);
2640 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2644 hpriv->ops->reset_flash(hpriv, mmio);
2645 hpriv->ops->reset_bus(pdev, mmio);
2646 hpriv->ops->enable_leds(hpriv, mmio);
2648 for (port = 0; port < host->n_ports; port++) {
2649 if (IS_GEN_II(hpriv)) {
2650 void __iomem *port_mmio = mv_port_base(mmio, port);
2652 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2653 ifctl |= (1 << 7); /* enable gen2i speed */
2654 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2655 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2658 hpriv->ops->phy_errata(hpriv, mmio, port);
2661 for (port = 0; port < host->n_ports; port++) {
2662 struct ata_port *ap = host->ports[port];
2663 void __iomem *port_mmio = mv_port_base(mmio, port);
2664 unsigned int offset = port_mmio - mmio;
2666 mv_port_init(&ap->ioaddr, port_mmio);
2668 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2669 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2672 for (hc = 0; hc < n_hc; hc++) {
2673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2675 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2676 "(before clear)=0x%08x\n", hc,
2677 readl(hc_mmio + HC_CFG_OFS),
2678 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2680 /* Clear any currently outstanding hc interrupt conditions */
2681 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2684 /* Clear any currently outstanding host interrupt conditions */
2685 writelfl(0, mmio + hpriv->irq_cause_ofs);
2687 /* and unmask interrupt generation for host regs */
2688 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2690 if (IS_GEN_I(hpriv))
2691 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2693 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2695 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2696 "PCI int cause/mask=0x%08x/0x%08x\n",
2697 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2698 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2699 readl(mmio + hpriv->irq_cause_ofs),
2700 readl(mmio + hpriv->irq_mask_ofs));
2707 * mv_print_info - Dump key info to kernel log for perusal.
2708 * @host: ATA host to print info about
2710 * FIXME: complete this.
2713 * Inherited from caller.
2715 static void mv_print_info(struct ata_host *host)
2717 struct pci_dev *pdev = to_pci_dev(host->dev);
2718 struct mv_host_priv *hpriv = host->private_data;
2720 const char *scc_s, *gen;
2722 /* Use this to determine the HW stepping of the chip so we know
2723 * what errata to workaround
2725 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2728 else if (scc == 0x01)
2733 if (IS_GEN_I(hpriv))
2735 else if (IS_GEN_II(hpriv))
2737 else if (IS_GEN_IIE(hpriv))
2742 dev_printk(KERN_INFO, &pdev->dev,
2743 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2744 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2745 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2749 * mv_init_one - handle a positive probe of a Marvell host
2750 * @pdev: PCI device found
2751 * @ent: PCI device ID entry for the matched host
2754 * Inherited from caller.
2756 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2758 static int printed_version;
2759 unsigned int board_idx = (unsigned int)ent->driver_data;
2760 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2761 struct ata_host *host;
2762 struct mv_host_priv *hpriv;
2765 if (!printed_version++)
2766 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2769 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2771 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2772 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2773 if (!host || !hpriv)
2775 host->private_data = hpriv;
2777 /* acquire resources */
2778 rc = pcim_enable_device(pdev);
2782 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2784 pcim_pin_device(pdev);
2787 host->iomap = pcim_iomap_table(pdev);
2789 rc = pci_go_64(pdev);
2793 /* initialize adapter */
2794 rc = mv_init_host(host, board_idx);
2798 /* Enable interrupts */
2799 if (msi && pci_enable_msi(pdev))
2802 mv_dump_pci_cfg(pdev, 0x68);
2803 mv_print_info(host);
2805 pci_set_master(pdev);
2806 pci_try_set_mwi(pdev);
2807 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2808 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2811 static int __init mv_init(void)
2813 return pci_register_driver(&mv_pci_driver);
2816 static void __exit mv_exit(void)
2818 pci_unregister_driver(&mv_pci_driver);
2821 MODULE_AUTHOR("Brett Russ");
2822 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2823 MODULE_LICENSE("GPL");
2824 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2825 MODULE_VERSION(DRV_VERSION);
2827 module_param(msi, int, 0444);
2828 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2830 module_init(mv_init);
2831 module_exit(mv_exit);