2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
219 SATA_INTERFACE_CTL = 0x050,
221 MV_M2_PREAMP_MASK = 0x7e0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
248 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
249 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
250 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
251 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
252 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
254 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
257 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
258 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
259 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
260 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
261 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
263 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
265 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
266 EDMA_ERR_OVERRUN_5 = (1 << 5),
267 EDMA_ERR_UNDERRUN_5 = (1 << 6),
269 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
270 EDMA_ERR_LNK_CTRL_RX_1 |
271 EDMA_ERR_LNK_CTRL_RX_3 |
272 EDMA_ERR_LNK_CTRL_TX,
274 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
284 EDMA_ERR_LNK_CTRL_RX_2 |
285 EDMA_ERR_LNK_DATA_RX |
286 EDMA_ERR_LNK_DATA_TX |
287 EDMA_ERR_TRANS_PROTO,
288 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
293 EDMA_ERR_UNDERRUN_5 |
294 EDMA_ERR_SELF_DIS_5 |
300 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
301 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
303 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
304 EDMA_REQ_Q_PTR_SHIFT = 5,
306 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
307 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
308 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
309 EDMA_RSP_Q_PTR_SHIFT = 3,
311 EDMA_CMD_OFS = 0x28, /* EDMA command register */
312 EDMA_EN = (1 << 0), /* enable EDMA */
313 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
314 ATA_RST = (1 << 2), /* reset trans/link/phy */
316 EDMA_IORDY_TMOUT = 0x34,
319 /* Host private flags (hp_flags) */
320 MV_HP_FLAG_MSI = (1 << 0),
321 MV_HP_ERRATA_50XXB0 = (1 << 1),
322 MV_HP_ERRATA_50XXB2 = (1 << 2),
323 MV_HP_ERRATA_60X1B2 = (1 << 3),
324 MV_HP_ERRATA_60X1C0 = (1 << 4),
325 MV_HP_ERRATA_XX42A0 = (1 << 5),
326 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
327 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
328 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
329 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
331 /* Port private flags (pp_flags) */
332 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
336 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
338 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
344 MV_DMA_BOUNDARY = 0xffffU,
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351 /* ditto, for response queue */
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
365 /* Command ReQuest Block: 32B */
381 /* Command ResPonse Block: 8B */
388 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
396 struct mv_port_priv {
397 struct mv_crqb *crqb;
399 struct mv_crpb *crpb;
401 struct mv_sg *sg_tbl;
402 dma_addr_t sg_tbl_dma;
404 unsigned int req_idx;
405 unsigned int resp_idx;
410 struct mv_port_signal {
415 struct mv_host_priv {
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
425 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
427 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
428 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
430 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
432 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
433 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
436 static void mv_irq_clear(struct ata_port *ap);
437 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
438 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
439 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441 static int mv_port_start(struct ata_port *ap);
442 static void mv_port_stop(struct ata_port *ap);
443 static void mv_qc_prep(struct ata_queued_cmd *qc);
444 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
445 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
446 static void mv_error_handler(struct ata_port *ap);
447 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
448 static void mv_eh_freeze(struct ata_port *ap);
449 static void mv_eh_thaw(struct ata_port *ap);
450 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
452 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
454 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
455 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
457 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
459 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
460 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
462 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
464 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
465 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
467 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
469 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
470 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
471 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port_no);
474 static struct scsi_host_template mv5_sht = {
475 .module = THIS_MODULE,
477 .ioctl = ata_scsi_ioctl,
478 .queuecommand = ata_scsi_queuecmd,
479 .can_queue = ATA_DEF_QUEUE,
480 .this_id = ATA_SHT_THIS_ID,
481 .sg_tablesize = MV_MAX_SG_CT / 2,
482 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
483 .emulated = ATA_SHT_EMULATED,
485 .proc_name = DRV_NAME,
486 .dma_boundary = MV_DMA_BOUNDARY,
487 .slave_configure = ata_scsi_slave_config,
488 .slave_destroy = ata_scsi_slave_destroy,
489 .bios_param = ata_std_bios_param,
492 static struct scsi_host_template mv6_sht = {
493 .module = THIS_MODULE,
495 .ioctl = ata_scsi_ioctl,
496 .queuecommand = ata_scsi_queuecmd,
497 .can_queue = ATA_DEF_QUEUE,
498 .this_id = ATA_SHT_THIS_ID,
499 .sg_tablesize = MV_MAX_SG_CT / 2,
500 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
501 .emulated = ATA_SHT_EMULATED,
503 .proc_name = DRV_NAME,
504 .dma_boundary = MV_DMA_BOUNDARY,
505 .slave_configure = ata_scsi_slave_config,
506 .slave_destroy = ata_scsi_slave_destroy,
507 .bios_param = ata_std_bios_param,
510 static const struct ata_port_operations mv5_ops = {
511 .tf_load = ata_tf_load,
512 .tf_read = ata_tf_read,
513 .check_status = ata_check_status,
514 .exec_command = ata_exec_command,
515 .dev_select = ata_std_dev_select,
517 .cable_detect = ata_cable_sata,
519 .qc_prep = mv_qc_prep,
520 .qc_issue = mv_qc_issue,
521 .data_xfer = ata_data_xfer,
523 .irq_clear = mv_irq_clear,
524 .irq_on = ata_irq_on,
526 .error_handler = mv_error_handler,
527 .post_internal_cmd = mv_post_int_cmd,
528 .freeze = mv_eh_freeze,
531 .scr_read = mv5_scr_read,
532 .scr_write = mv5_scr_write,
534 .port_start = mv_port_start,
535 .port_stop = mv_port_stop,
538 static const struct ata_port_operations mv6_ops = {
539 .tf_load = ata_tf_load,
540 .tf_read = ata_tf_read,
541 .check_status = ata_check_status,
542 .exec_command = ata_exec_command,
543 .dev_select = ata_std_dev_select,
545 .cable_detect = ata_cable_sata,
547 .qc_prep = mv_qc_prep,
548 .qc_issue = mv_qc_issue,
549 .data_xfer = ata_data_xfer,
551 .irq_clear = mv_irq_clear,
552 .irq_on = ata_irq_on,
554 .error_handler = mv_error_handler,
555 .post_internal_cmd = mv_post_int_cmd,
556 .freeze = mv_eh_freeze,
559 .scr_read = mv_scr_read,
560 .scr_write = mv_scr_write,
562 .port_start = mv_port_start,
563 .port_stop = mv_port_stop,
566 static const struct ata_port_operations mv_iie_ops = {
567 .tf_load = ata_tf_load,
568 .tf_read = ata_tf_read,
569 .check_status = ata_check_status,
570 .exec_command = ata_exec_command,
571 .dev_select = ata_std_dev_select,
573 .cable_detect = ata_cable_sata,
575 .qc_prep = mv_qc_prep_iie,
576 .qc_issue = mv_qc_issue,
577 .data_xfer = ata_data_xfer,
579 .irq_clear = mv_irq_clear,
580 .irq_on = ata_irq_on,
582 .error_handler = mv_error_handler,
583 .post_internal_cmd = mv_post_int_cmd,
584 .freeze = mv_eh_freeze,
587 .scr_read = mv_scr_read,
588 .scr_write = mv_scr_write,
590 .port_start = mv_port_start,
591 .port_stop = mv_port_stop,
594 static const struct ata_port_info mv_port_info[] = {
596 .flags = MV_COMMON_FLAGS,
597 .pio_mask = 0x1f, /* pio0-4 */
598 .udma_mask = ATA_UDMA6,
599 .port_ops = &mv5_ops,
602 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv5_ops,
608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv5_ops,
614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv6_ops,
620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
622 .pio_mask = 0x1f, /* pio0-4 */
623 .udma_mask = ATA_UDMA6,
624 .port_ops = &mv6_ops,
627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
628 .pio_mask = 0x1f, /* pio0-4 */
629 .udma_mask = ATA_UDMA6,
630 .port_ops = &mv_iie_ops,
633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
634 .pio_mask = 0x1f, /* pio0-4 */
635 .udma_mask = ATA_UDMA6,
636 .port_ops = &mv_iie_ops,
640 static const struct pci_device_id mv_pci_tbl[] = {
641 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
642 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
643 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
644 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
645 /* RocketRAID 1740/174x have different identifiers */
646 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
647 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
649 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
650 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
651 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
652 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
653 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
655 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
658 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
660 /* Marvell 7042 support */
661 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
663 /* Highpoint RocketRAID PCIe series */
664 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
665 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
667 { } /* terminate list */
670 static struct pci_driver mv_pci_driver = {
672 .id_table = mv_pci_tbl,
673 .probe = mv_init_one,
674 .remove = ata_pci_remove_one,
677 static const struct mv_hw_ops mv5xxx_ops = {
678 .phy_errata = mv5_phy_errata,
679 .enable_leds = mv5_enable_leds,
680 .read_preamp = mv5_read_preamp,
681 .reset_hc = mv5_reset_hc,
682 .reset_flash = mv5_reset_flash,
683 .reset_bus = mv5_reset_bus,
686 static const struct mv_hw_ops mv6xxx_ops = {
687 .phy_errata = mv6_phy_errata,
688 .enable_leds = mv6_enable_leds,
689 .read_preamp = mv6_read_preamp,
690 .reset_hc = mv6_reset_hc,
691 .reset_flash = mv6_reset_flash,
692 .reset_bus = mv_reset_pci_bus,
698 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
701 /* move to PCI layer or libata core? */
702 static int pci_go_64(struct pci_dev *pdev)
706 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
707 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
709 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
711 dev_printk(KERN_ERR, &pdev->dev,
712 "64-bit DMA enable failed\n");
717 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
719 dev_printk(KERN_ERR, &pdev->dev,
720 "32-bit DMA enable failed\n");
723 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
725 dev_printk(KERN_ERR, &pdev->dev,
726 "32-bit consistent DMA enable failed\n");
738 static inline void writelfl(unsigned long data, void __iomem *addr)
741 (void) readl(addr); /* flush to avoid PCI posted write */
744 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
746 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
749 static inline unsigned int mv_hc_from_port(unsigned int port)
751 return port >> MV_PORT_HC_SHIFT;
754 static inline unsigned int mv_hardport_from_port(unsigned int port)
756 return port & MV_PORT_MASK;
759 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 return mv_hc_base(base, mv_hc_from_port(port));
765 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
767 return mv_hc_base_from_port(base, port) +
768 MV_SATAHC_ARBTR_REG_SZ +
769 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
772 static inline void __iomem *mv_ap_base(struct ata_port *ap)
774 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
777 static inline int mv_get_hc_count(unsigned long port_flags)
779 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
782 static void mv_irq_clear(struct ata_port *ap)
786 static void mv_set_edma_ptrs(void __iomem *port_mmio,
787 struct mv_host_priv *hpriv,
788 struct mv_port_priv *pp)
793 * initialize request queue
795 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
797 WARN_ON(pp->crqb_dma & 0x3ff);
798 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
799 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
800 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
802 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
803 writelfl((pp->crqb_dma & 0xffffffff) | index,
804 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
806 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
809 * initialize response queue
811 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
813 WARN_ON(pp->crpb_dma & 0xff);
814 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
816 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
817 writelfl((pp->crpb_dma & 0xffffffff) | index,
818 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
820 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
822 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
823 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
827 * mv_start_dma - Enable eDMA engine
828 * @base: port base address
829 * @pp: port private data
831 * Verify the local cache of the eDMA state is accurate with a
835 * Inherited from caller.
837 static void mv_start_dma(void __iomem *port_mmio, struct mv_host_priv *hpriv,
838 struct mv_port_priv *pp)
840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
841 /* clear EDMA event indicators, if any */
842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
844 mv_set_edma_ptrs(port_mmio, hpriv, pp);
846 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
849 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
853 * __mv_stop_dma - Disable eDMA engine
854 * @ap: ATA channel to manipulate
856 * Verify the local cache of the eDMA state is accurate with a
860 * Inherited from caller.
862 static int __mv_stop_dma(struct ata_port *ap)
864 void __iomem *port_mmio = mv_ap_base(ap);
865 struct mv_port_priv *pp = ap->private_data;
869 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
870 /* Disable EDMA if active. The disable bit auto clears.
872 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
873 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
875 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
878 /* now properly wait for the eDMA to stop */
879 for (i = 1000; i > 0; i--) {
880 reg = readl(port_mmio + EDMA_CMD_OFS);
881 if (!(reg & EDMA_EN))
888 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
895 static int mv_stop_dma(struct ata_port *ap)
900 spin_lock_irqsave(&ap->host->lock, flags);
901 rc = __mv_stop_dma(ap);
902 spin_unlock_irqrestore(&ap->host->lock, flags);
908 static void mv_dump_mem(void __iomem *start, unsigned bytes)
911 for (b = 0; b < bytes; ) {
912 DPRINTK("%p: ", start + b);
913 for (w = 0; b < bytes && w < 4; w++) {
914 printk("%08x ", readl(start + b));
922 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
927 for (b = 0; b < bytes; ) {
928 DPRINTK("%02x: ", b);
929 for (w = 0; b < bytes && w < 4; w++) {
930 (void) pci_read_config_dword(pdev, b, &dw);
938 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
939 struct pci_dev *pdev)
942 void __iomem *hc_base = mv_hc_base(mmio_base,
943 port >> MV_PORT_HC_SHIFT);
944 void __iomem *port_base;
945 int start_port, num_ports, p, start_hc, num_hcs, hc;
948 start_hc = start_port = 0;
949 num_ports = 8; /* shld be benign for 4 port devs */
952 start_hc = port >> MV_PORT_HC_SHIFT;
954 num_ports = num_hcs = 1;
956 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
957 num_ports > 1 ? num_ports - 1 : start_port);
960 DPRINTK("PCI config space regs:\n");
961 mv_dump_pci_cfg(pdev, 0x68);
963 DPRINTK("PCI regs:\n");
964 mv_dump_mem(mmio_base+0xc00, 0x3c);
965 mv_dump_mem(mmio_base+0xd00, 0x34);
966 mv_dump_mem(mmio_base+0xf00, 0x4);
967 mv_dump_mem(mmio_base+0x1d00, 0x6c);
968 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
969 hc_base = mv_hc_base(mmio_base, hc);
970 DPRINTK("HC regs (HC %i):\n", hc);
971 mv_dump_mem(hc_base, 0x1c);
973 for (p = start_port; p < start_port + num_ports; p++) {
974 port_base = mv_port_base(mmio_base, p);
975 DPRINTK("EDMA regs (port %i):\n", p);
976 mv_dump_mem(port_base, 0x54);
977 DPRINTK("SATA regs (port %i):\n", p);
978 mv_dump_mem(port_base+0x300, 0x60);
983 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
991 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
994 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1003 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1005 unsigned int ofs = mv_scr_offset(sc_reg_in);
1007 if (ofs != 0xffffffffU) {
1008 *val = readl(mv_ap_base(ap) + ofs);
1014 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1016 unsigned int ofs = mv_scr_offset(sc_reg_in);
1018 if (ofs != 0xffffffffU) {
1019 writelfl(val, mv_ap_base(ap) + ofs);
1025 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1026 void __iomem *port_mmio)
1028 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1030 /* set up non-NCQ EDMA configuration */
1031 cfg &= ~(1 << 9); /* disable eQue */
1033 if (IS_GEN_I(hpriv)) {
1034 cfg &= ~0x1f; /* clear queue depth */
1035 cfg |= (1 << 8); /* enab config burst size mask */
1038 else if (IS_GEN_II(hpriv)) {
1039 cfg &= ~0x1f; /* clear queue depth */
1040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1041 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1044 else if (IS_GEN_IIE(hpriv)) {
1045 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1046 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1047 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1048 cfg |= (1 << 18); /* enab early completion */
1049 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1050 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1051 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1054 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1058 * mv_port_start - Port specific init/start routine.
1059 * @ap: ATA channel to manipulate
1061 * Allocate and point to DMA memory, init port private memory,
1065 * Inherited from caller.
1067 static int mv_port_start(struct ata_port *ap)
1069 struct device *dev = ap->host->dev;
1070 struct mv_host_priv *hpriv = ap->host->private_data;
1071 struct mv_port_priv *pp;
1072 void __iomem *port_mmio = mv_ap_base(ap);
1075 unsigned long flags;
1078 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1082 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1086 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1088 rc = ata_pad_alloc(ap, dev);
1092 /* First item in chunk of DMA memory:
1093 * 32-slot command request table (CRQB), 32 bytes each in size
1096 pp->crqb_dma = mem_dma;
1097 mem += MV_CRQB_Q_SZ;
1098 mem_dma += MV_CRQB_Q_SZ;
1101 * 32-slot command response table (CRPB), 8 bytes each in size
1104 pp->crpb_dma = mem_dma;
1105 mem += MV_CRPB_Q_SZ;
1106 mem_dma += MV_CRPB_Q_SZ;
1109 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1112 pp->sg_tbl_dma = mem_dma;
1114 spin_lock_irqsave(&ap->host->lock, flags);
1116 mv_edma_cfg(ap, hpriv, port_mmio);
1118 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1120 spin_unlock_irqrestore(&ap->host->lock, flags);
1122 /* Don't turn on EDMA here...do it before DMA commands only. Else
1123 * we'll be unable to send non-data, PIO, etc due to restricted access
1126 ap->private_data = pp;
1131 * mv_port_stop - Port specific cleanup/stop routine.
1132 * @ap: ATA channel to manipulate
1134 * Stop DMA, cleanup port memory.
1137 * This routine uses the host lock to protect the DMA stop.
1139 static void mv_port_stop(struct ata_port *ap)
1145 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1146 * @qc: queued command whose SG list to source from
1148 * Populate the SG list and mark the last entry.
1151 * Inherited from caller.
1153 static void mv_fill_sg(struct ata_queued_cmd *qc)
1155 struct mv_port_priv *pp = qc->ap->private_data;
1156 struct scatterlist *sg;
1157 struct mv_sg *mv_sg, *last_sg = NULL;
1161 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1162 dma_addr_t addr = sg_dma_address(sg);
1163 u32 sg_len = sg_dma_len(sg);
1166 u32 offset = addr & 0xffff;
1169 if ((offset + sg_len > 0x10000))
1170 len = 0x10000 - offset;
1172 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1173 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1174 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1184 if (likely(last_sg))
1185 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1188 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1190 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1191 (last ? CRQB_CMD_LAST : 0);
1192 *cmdw = cpu_to_le16(tmp);
1196 * mv_qc_prep - Host specific command preparation.
1197 * @qc: queued command to prepare
1199 * This routine simply redirects to the general purpose routine
1200 * if command is not DMA. Else, it handles prep of the CRQB
1201 * (command request block), does some sanity checking, and calls
1202 * the SG load routine.
1205 * Inherited from caller.
1207 static void mv_qc_prep(struct ata_queued_cmd *qc)
1209 struct ata_port *ap = qc->ap;
1210 struct mv_port_priv *pp = ap->private_data;
1212 struct ata_taskfile *tf;
1216 if (qc->tf.protocol != ATA_PROT_DMA)
1219 /* Fill in command request block
1221 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1222 flags |= CRQB_FLAG_READ;
1223 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1224 flags |= qc->tag << CRQB_TAG_SHIFT;
1225 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1227 /* get current queue index from software */
1228 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1230 pp->crqb[in_index].sg_addr =
1231 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1232 pp->crqb[in_index].sg_addr_hi =
1233 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1234 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1236 cw = &pp->crqb[in_index].ata_cmd[0];
1239 /* Sadly, the CRQB cannot accomodate all registers--there are
1240 * only 11 bytes...so we must pick and choose required
1241 * registers based on the command. So, we drop feature and
1242 * hob_feature for [RW] DMA commands, but they are needed for
1243 * NCQ. NCQ will drop hob_nsect.
1245 switch (tf->command) {
1247 case ATA_CMD_READ_EXT:
1249 case ATA_CMD_WRITE_EXT:
1250 case ATA_CMD_WRITE_FUA_EXT:
1251 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1253 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1254 case ATA_CMD_FPDMA_READ:
1255 case ATA_CMD_FPDMA_WRITE:
1256 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1257 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1259 #endif /* FIXME: remove this line when NCQ added */
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1269 BUG_ON(tf->command);
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1297 * Inherited from caller.
1299 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
1308 if (qc->tf.protocol != ATA_PROT_DMA)
1311 /* Fill in Gen IIE command request block
1313 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1314 flags |= CRQB_FLAG_READ;
1316 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1317 flags |= qc->tag << CRQB_TAG_SHIFT;
1318 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1319 what we use as our tag */
1321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1327 crqb->flags = cpu_to_le32(flags);
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1334 crqb->ata_cmd[1] = cpu_to_le32(
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1346 crqb->ata_cmd[3] = cpu_to_le32(
1348 (tf->hob_nsect << 8)
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1366 * Inherited from caller.
1368 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_host_priv *hpriv = ap->host->private_data;
1376 if (qc->tf.protocol != ATA_PROT_DMA) {
1377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1382 return ata_qc_issue_prot(qc);
1385 mv_start_dma(port_mmio, hpriv, pp);
1387 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1389 /* until we do queuing, the queue should be empty at this point */
1390 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1391 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1397 /* and write the request in pointer to kick the EDMA to life */
1398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
1407 * @reset_allowed: bool: 0 == don't trigger from reset here
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1416 * Inherited from caller.
1418 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1420 void __iomem *port_mmio = mv_ap_base(ap);
1421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
1426 struct ata_eh_info *ehi = &ap->link.eh_info;
1428 ata_ehi_clear_desc(ehi);
1430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1443 * all generations share these EDMA error cause bits
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
1452 action |= ATA_EH_HARDRESET;
1453 ata_ehi_push_desc(ehi, "parity error");
1455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1458 "dev disconnect" : "dev connect");
1459 action |= ATA_EH_HARDRESET;
1462 if (IS_GEN_I(hpriv)) {
1463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1466 struct mv_port_priv *pp = ap->private_data;
1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1468 ata_ehi_push_desc(ehi, "EDMA self-disable");
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1474 struct mv_port_priv *pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
1479 if (edma_err_cause & EDMA_ERR_SERR) {
1480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1482 err_mask = AC_ERR_ATA_BUS;
1483 action |= ATA_EH_HARDRESET;
1487 /* Clear EDMA now that SERR cleanup done */
1488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1491 err_mask = AC_ERR_OTHER;
1492 action |= ATA_EH_HARDRESET;
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1499 qc->err_mask |= err_mask;
1501 ehi->err_mask |= err_mask;
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1509 static void mv_intr_pio(struct ata_port *ap)
1511 struct ata_queued_cmd *qc;
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1519 /* get active ATA command */
1520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1521 if (unlikely(!qc)) /* no active tag */
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1531 static void mv_intr_edma(struct ata_port *ap)
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1553 /* 50xx: get active ATA command */
1554 if (IS_GEN_I(hpriv))
1555 tag = ap->link.active_tag;
1557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
1561 else if (IS_GEN_II(hpriv))
1562 tag = (le16_to_cpu(pp->crpb[out_index].id)
1563 >> CRPB_IOID_SHIFT_6) & 0x3f;
1565 else /* IS_GEN_IIE */
1566 tag = (le16_to_cpu(pp->crpb[out_index].id)
1567 >> CRPB_IOID_SHIFT_7) & 0x3f;
1569 qc = ata_qc_from_tag(ap, tag);
1571 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1572 * bits (WARNING: might not necessarily be associated
1573 * with this command), which -should- be clear
1576 status = le16_to_cpu(pp->crpb[out_index].flags);
1577 if (unlikely(status & 0xff)) {
1578 mv_err_intr(ap, qc);
1582 /* and finally, complete the ATA command */
1585 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1586 ata_qc_complete(qc);
1589 /* advance software response queue pointer, to
1590 * indicate (after the loop completes) to hardware
1591 * that we have consumed a response queue entry.
1598 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1599 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1600 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1604 * mv_host_intr - Handle all interrupts on the given host controller
1605 * @host: host specific structure
1606 * @relevant: port error bits relevant to this host controller
1607 * @hc: which host controller we're to look at
1609 * Read then write clear the HC interrupt status then walk each
1610 * port connected to the HC and see if it needs servicing. Port
1611 * success ints are reported in the HC interrupt status reg, the
1612 * port error ints are reported in the higher level main
1613 * interrupt status register and thus are passed in via the
1614 * 'relevant' argument.
1617 * Inherited from caller.
1619 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1621 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1622 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1629 port0 = MV_PORTS_PER_HC;
1631 /* we'll need the HC success int register in most cases */
1632 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1636 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1638 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1639 hc, relevant, hc_irq_cause);
1641 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1642 struct ata_port *ap = host->ports[port];
1643 struct mv_port_priv *pp = ap->private_data;
1644 int have_err_bits, hard_port, shift;
1646 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1649 shift = port << 1; /* (port * 2) */
1650 if (port >= MV_PORTS_PER_HC) {
1651 shift++; /* skip bit 8 in the HC Main IRQ reg */
1653 have_err_bits = ((PORT0_ERR << shift) & relevant);
1655 if (unlikely(have_err_bits)) {
1656 struct ata_queued_cmd *qc;
1658 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1659 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1662 mv_err_intr(ap, qc);
1666 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1668 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1669 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1672 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1679 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1681 struct mv_host_priv *hpriv = host->private_data;
1682 struct ata_port *ap;
1683 struct ata_queued_cmd *qc;
1684 struct ata_eh_info *ehi;
1685 unsigned int i, err_mask, printed = 0;
1688 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1690 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1693 DPRINTK("All regs @ PCI error\n");
1694 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1696 writelfl(0, mmio + hpriv->irq_cause_ofs);
1698 for (i = 0; i < host->n_ports; i++) {
1699 ap = host->ports[i];
1700 if (!ata_link_offline(&ap->link)) {
1701 ehi = &ap->link.eh_info;
1702 ata_ehi_clear_desc(ehi);
1704 ata_ehi_push_desc(ehi,
1705 "PCI err cause 0x%08x", err_cause);
1706 err_mask = AC_ERR_HOST_BUS;
1707 ehi->action = ATA_EH_HARDRESET;
1708 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1710 qc->err_mask |= err_mask;
1712 ehi->err_mask |= err_mask;
1714 ata_port_freeze(ap);
1720 * mv_interrupt - Main interrupt event handler
1722 * @dev_instance: private data; in this case the host structure
1724 * Read the read only register to determine if any host
1725 * controllers have pending interrupts. If so, call lower level
1726 * routine to handle. Also check for PCI errors which are only
1730 * This routine holds the host lock while processing pending
1733 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1735 struct ata_host *host = dev_instance;
1736 unsigned int hc, handled = 0, n_hcs;
1737 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1738 u32 irq_stat, irq_mask;
1740 spin_lock(&host->lock);
1741 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1742 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1744 /* check the cases where we either have nothing pending or have read
1745 * a bogus register value which can indicate HW removal or PCI fault
1747 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1750 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1752 if (unlikely(irq_stat & PCI_ERR)) {
1753 mv_pci_error(host, mmio);
1755 goto out_unlock; /* skip all other HC irq handling */
1758 for (hc = 0; hc < n_hcs; hc++) {
1759 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1761 mv_host_intr(host, relevant, hc);
1767 spin_unlock(&host->lock);
1769 return IRQ_RETVAL(handled);
1772 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1774 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1775 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1777 return hc_mmio + ofs;
1780 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1784 switch (sc_reg_in) {
1788 ofs = sc_reg_in * sizeof(u32);
1797 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1799 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1800 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1801 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1803 if (ofs != 0xffffffffU) {
1804 *val = readl(addr + ofs);
1810 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1812 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1813 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1814 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1816 if (ofs != 0xffffffffU) {
1817 writelfl(val, addr + ofs);
1823 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1835 mv_reset_pci_bus(pdev, mmio);
1838 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1843 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1855 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1868 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
1896 #define ZERO(reg) writel(0, port_mmio + (reg))
1897 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1900 void __iomem *port_mmio = mv_port_base(mmio, port);
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1904 mv_channel_reset(hpriv, mmio, port);
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1922 #define ZERO(reg) writel(0, hc_mmio + (reg))
1923 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1934 tmp = readl(hc_mmio + 0x20);
1937 writel(tmp, hc_mmio + 0x20);
1941 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 unsigned int hc, port;
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1958 #define ZERO(reg) writel(0, mmio + (reg))
1959 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1961 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1962 struct mv_host_priv *hpriv = host->private_data;
1965 tmp = readl(mmio + MV_PCI_MODE);
1967 writel(tmp, mmio + MV_PCI_MODE);
1969 ZERO(MV_PCI_DISC_TIMER);
1970 ZERO(MV_PCI_MSI_TRIGGER);
1971 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1972 ZERO(HC_MAIN_IRQ_MASK_OFS);
1973 ZERO(MV_PCI_SERR_MASK);
1974 ZERO(hpriv->irq_cause_ofs);
1975 ZERO(hpriv->irq_mask_ofs);
1976 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1977 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1978 ZERO(MV_PCI_ERR_ATTRIBUTE);
1979 ZERO(MV_PCI_ERR_COMMAND);
1983 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1987 mv5_reset_flash(hpriv, mmio);
1989 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1991 tmp |= (1 << 5) | (1 << 6);
1992 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1996 * mv6_reset_hc - Perform the 6xxx global soft reset
1997 * @mmio: base address of the HBA
1999 * This routine only applies to 6xxx parts.
2002 * Inherited from caller.
2004 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2007 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2011 /* Following procedure defined in PCI "main command and status
2015 writel(t | STOP_PCI_MASTER, reg);
2017 for (i = 0; i < 1000; i++) {
2020 if (PCI_MASTER_EMPTY & t)
2023 if (!(PCI_MASTER_EMPTY & t)) {
2024 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2032 writel(t | GLOB_SFT_RST, reg);
2035 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2037 if (!(GLOB_SFT_RST & t)) {
2038 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2043 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2046 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2049 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2051 if (GLOB_SFT_RST & t) {
2052 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2059 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2062 void __iomem *port_mmio;
2065 tmp = readl(mmio + MV_RESET_CFG);
2066 if ((tmp & (1 << 0)) == 0) {
2067 hpriv->signal[idx].amps = 0x7 << 8;
2068 hpriv->signal[idx].pre = 0x1 << 5;
2072 port_mmio = mv_port_base(mmio, idx);
2073 tmp = readl(port_mmio + PHY_MODE2);
2075 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2076 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2079 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2081 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2084 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2087 void __iomem *port_mmio = mv_port_base(mmio, port);
2089 u32 hp_flags = hpriv->hp_flags;
2091 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2093 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2096 if (fix_phy_mode2) {
2097 m2 = readl(port_mmio + PHY_MODE2);
2100 writel(m2, port_mmio + PHY_MODE2);
2104 m2 = readl(port_mmio + PHY_MODE2);
2105 m2 &= ~((1 << 16) | (1 << 31));
2106 writel(m2, port_mmio + PHY_MODE2);
2111 /* who knows what this magic does */
2112 tmp = readl(port_mmio + PHY_MODE3);
2115 writel(tmp, port_mmio + PHY_MODE3);
2117 if (fix_phy_mode4) {
2120 m4 = readl(port_mmio + PHY_MODE4);
2122 if (hp_flags & MV_HP_ERRATA_60X1B2)
2123 tmp = readl(port_mmio + 0x310);
2125 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2127 writel(m4, port_mmio + PHY_MODE4);
2129 if (hp_flags & MV_HP_ERRATA_60X1B2)
2130 writel(tmp, port_mmio + 0x310);
2133 /* Revert values of pre-emphasis and signal amps to the saved ones */
2134 m2 = readl(port_mmio + PHY_MODE2);
2136 m2 &= ~MV_M2_PREAMP_MASK;
2137 m2 |= hpriv->signal[port].amps;
2138 m2 |= hpriv->signal[port].pre;
2141 /* according to mvSata 3.6.1, some IIE values are fixed */
2142 if (IS_GEN_IIE(hpriv)) {
2147 writel(m2, port_mmio + PHY_MODE2);
2150 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2151 unsigned int port_no)
2153 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2155 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2157 if (IS_GEN_II(hpriv)) {
2158 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2159 ifctl |= (1 << 7); /* enable gen2i speed */
2160 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2161 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2164 udelay(25); /* allow reset propagation */
2166 /* Spec never mentions clearing the bit. Marvell's driver does
2167 * clear the bit, however.
2169 writelfl(0, port_mmio + EDMA_CMD_OFS);
2171 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2173 if (IS_GEN_I(hpriv))
2178 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2179 * @ap: ATA channel to manipulate
2181 * Part of this is taken from __sata_phy_reset and modified to
2182 * not sleep since this routine gets called from interrupt level.
2185 * Inherited from caller. This is coded to safe to call at
2186 * interrupt level, i.e. it does not sleep.
2188 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2189 unsigned long deadline)
2191 struct mv_port_priv *pp = ap->private_data;
2192 struct mv_host_priv *hpriv = ap->host->private_data;
2193 void __iomem *port_mmio = mv_ap_base(ap);
2197 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2201 u32 sstatus, serror, scontrol;
2203 mv_scr_read(ap, SCR_STATUS, &sstatus);
2204 mv_scr_read(ap, SCR_ERROR, &serror);
2205 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2206 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2207 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2211 /* Issue COMRESET via SControl */
2213 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2216 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2220 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2221 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2225 } while (time_before(jiffies, deadline));
2227 /* work around errata */
2228 if (IS_GEN_II(hpriv) &&
2229 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2231 goto comreset_retry;
2235 u32 sstatus, serror, scontrol;
2237 mv_scr_read(ap, SCR_STATUS, &sstatus);
2238 mv_scr_read(ap, SCR_ERROR, &serror);
2239 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2240 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2241 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2245 if (ata_link_offline(&ap->link)) {
2246 *class = ATA_DEV_NONE;
2250 /* even after SStatus reflects that device is ready,
2251 * it seems to take a while for link to be fully
2252 * established (and thus Status no longer 0x80/0x7F),
2253 * so we poll a bit for that, here.
2257 u8 drv_stat = ata_check_status(ap);
2258 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2263 if (time_after(jiffies, deadline))
2267 /* FIXME: if we passed the deadline, the following
2268 * code probably produces an invalid result
2271 /* finally, read device signature from TF registers */
2272 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2274 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2276 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2281 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2283 struct ata_port *ap = link->ap;
2284 struct mv_port_priv *pp = ap->private_data;
2285 struct ata_eh_context *ehc = &link->eh_context;
2288 rc = mv_stop_dma(ap);
2290 ehc->i.action |= ATA_EH_HARDRESET;
2292 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2293 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2294 ehc->i.action |= ATA_EH_HARDRESET;
2297 /* if we're about to do hardreset, nothing more to do */
2298 if (ehc->i.action & ATA_EH_HARDRESET)
2301 if (ata_link_online(link))
2302 rc = ata_wait_ready(ap, deadline);
2309 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2310 unsigned long deadline)
2312 struct ata_port *ap = link->ap;
2313 struct mv_host_priv *hpriv = ap->host->private_data;
2314 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2318 mv_channel_reset(hpriv, mmio, ap->port_no);
2320 mv_phy_reset(ap, class, deadline);
2325 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2327 struct ata_port *ap = link->ap;
2330 /* print link status */
2331 sata_print_link_status(link);
2334 sata_scr_read(link, SCR_ERROR, &serr);
2335 sata_scr_write_flush(link, SCR_ERROR, serr);
2337 /* bail out if no device is present */
2338 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2339 DPRINTK("EXIT, no device\n");
2343 /* set up device control */
2344 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2347 static void mv_error_handler(struct ata_port *ap)
2349 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2350 mv_hardreset, mv_postreset);
2353 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2355 mv_stop_dma(qc->ap);
2358 static void mv_eh_freeze(struct ata_port *ap)
2360 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2361 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2365 /* FIXME: handle coalescing completion events properly */
2367 shift = ap->port_no * 2;
2371 mask = 0x3 << shift;
2373 /* disable assertion of portN err, done events */
2374 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2375 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2378 static void mv_eh_thaw(struct ata_port *ap)
2380 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2381 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2382 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2383 void __iomem *port_mmio = mv_ap_base(ap);
2384 u32 tmp, mask, hc_irq_cause;
2385 unsigned int shift, hc_port_no = ap->port_no;
2387 /* FIXME: handle coalescing completion events properly */
2389 shift = ap->port_no * 2;
2395 mask = 0x3 << shift;
2397 /* clear EDMA errors on this port */
2398 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2400 /* clear pending irq events */
2401 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2402 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2403 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2404 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2406 /* enable assertion of portN err, done events */
2407 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2408 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2412 * mv_port_init - Perform some early initialization on a single port.
2413 * @port: libata data structure storing shadow register addresses
2414 * @port_mmio: base address of the port
2416 * Initialize shadow register mmio addresses, clear outstanding
2417 * interrupts on the port, and unmask interrupts for the future
2418 * start of the port.
2421 * Inherited from caller.
2423 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2425 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2428 /* PIO related setup
2430 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2432 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2433 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2434 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2435 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2436 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2437 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2439 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2440 /* special case: control/altstatus doesn't have ATA_REG_ address */
2441 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2444 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2446 /* Clear any currently outstanding port interrupt conditions */
2447 serr_ofs = mv_scr_offset(SCR_ERROR);
2448 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2449 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451 /* unmask all non-transient EDMA error interrupts */
2452 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2454 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2455 readl(port_mmio + EDMA_CFG_OFS),
2456 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2457 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2460 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2462 struct pci_dev *pdev = to_pci_dev(host->dev);
2463 struct mv_host_priv *hpriv = host->private_data;
2464 u32 hp_flags = hpriv->hp_flags;
2466 switch (board_idx) {
2468 hpriv->ops = &mv5xxx_ops;
2469 hp_flags |= MV_HP_GEN_I;
2471 switch (pdev->revision) {
2473 hp_flags |= MV_HP_ERRATA_50XXB0;
2476 hp_flags |= MV_HP_ERRATA_50XXB2;
2479 dev_printk(KERN_WARNING, &pdev->dev,
2480 "Applying 50XXB2 workarounds to unknown rev\n");
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2488 hpriv->ops = &mv5xxx_ops;
2489 hp_flags |= MV_HP_GEN_I;
2491 switch (pdev->revision) {
2493 hp_flags |= MV_HP_ERRATA_50XXB0;
2496 hp_flags |= MV_HP_ERRATA_50XXB2;
2499 dev_printk(KERN_WARNING, &pdev->dev,
2500 "Applying B2 workarounds to unknown rev\n");
2501 hp_flags |= MV_HP_ERRATA_50XXB2;
2508 hpriv->ops = &mv6xxx_ops;
2509 hp_flags |= MV_HP_GEN_II;
2511 switch (pdev->revision) {
2513 hp_flags |= MV_HP_ERRATA_60X1B2;
2516 hp_flags |= MV_HP_ERRATA_60X1C0;
2519 dev_printk(KERN_WARNING, &pdev->dev,
2520 "Applying B2 workarounds to unknown rev\n");
2521 hp_flags |= MV_HP_ERRATA_60X1B2;
2527 hp_flags |= MV_HP_PCIE;
2528 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2529 (pdev->device == 0x2300 || pdev->device == 0x2310))
2532 * Highpoint RocketRAID PCIe 23xx series cards:
2534 * Unconfigured drives are treated as "Legacy"
2535 * by the BIOS, and it overwrites sector 8 with
2536 * a "Lgcy" metadata block prior to Linux boot.
2538 * Configured drives (RAID or JBOD) leave sector 8
2539 * alone, but instead overwrite a high numbered
2540 * sector for the RAID metadata. This sector can
2541 * be determined exactly, by truncating the physical
2542 * drive capacity to a nice even GB value.
2544 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2546 * Warn the user, lest they think we're just buggy.
2548 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2549 " BIOS CORRUPTS DATA on all attached drives,"
2550 " regardless of if/how they are configured."
2552 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2553 " use sectors 8-9 on \"Legacy\" drives,"
2554 " and avoid the final two gigabytes on"
2555 " all RocketRAID BIOS initialized drives.\n");
2558 hpriv->ops = &mv6xxx_ops;
2559 hp_flags |= MV_HP_GEN_IIE;
2561 switch (pdev->revision) {
2563 hp_flags |= MV_HP_ERRATA_XX42A0;
2566 hp_flags |= MV_HP_ERRATA_60X1C0;
2569 dev_printk(KERN_WARNING, &pdev->dev,
2570 "Applying 60X1C0 workarounds to unknown rev\n");
2571 hp_flags |= MV_HP_ERRATA_60X1C0;
2577 dev_printk(KERN_ERR, &pdev->dev,
2578 "BUG: invalid board index %u\n", board_idx);
2582 hpriv->hp_flags = hp_flags;
2583 if (hp_flags & MV_HP_PCIE) {
2584 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2585 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2586 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2588 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2589 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2590 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2597 * mv_init_host - Perform some early initialization of the host.
2598 * @host: ATA host to initialize
2599 * @board_idx: controller index
2601 * If possible, do an early global reset of the host. Then do
2602 * our port init and clear/unmask all/relevant host interrupts.
2605 * Inherited from caller.
2607 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2609 int rc = 0, n_hc, port, hc;
2610 struct pci_dev *pdev = to_pci_dev(host->dev);
2611 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2612 struct mv_host_priv *hpriv = host->private_data;
2614 /* global interrupt mask */
2615 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 rc = mv_chip_id(host, board_idx);
2621 n_hc = mv_get_hc_count(host->ports[0]->flags);
2623 for (port = 0; port < host->n_ports; port++)
2624 hpriv->ops->read_preamp(hpriv, port, mmio);
2626 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2630 hpriv->ops->reset_flash(hpriv, mmio);
2631 hpriv->ops->reset_bus(pdev, mmio);
2632 hpriv->ops->enable_leds(hpriv, mmio);
2634 for (port = 0; port < host->n_ports; port++) {
2635 if (IS_GEN_II(hpriv)) {
2636 void __iomem *port_mmio = mv_port_base(mmio, port);
2638 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2639 ifctl |= (1 << 7); /* enable gen2i speed */
2640 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2641 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2644 hpriv->ops->phy_errata(hpriv, mmio, port);
2647 for (port = 0; port < host->n_ports; port++) {
2648 struct ata_port *ap = host->ports[port];
2649 void __iomem *port_mmio = mv_port_base(mmio, port);
2650 unsigned int offset = port_mmio - mmio;
2652 mv_port_init(&ap->ioaddr, port_mmio);
2654 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2655 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2658 for (hc = 0; hc < n_hc; hc++) {
2659 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2661 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2662 "(before clear)=0x%08x\n", hc,
2663 readl(hc_mmio + HC_CFG_OFS),
2664 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2666 /* Clear any currently outstanding hc interrupt conditions */
2667 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2670 /* Clear any currently outstanding host interrupt conditions */
2671 writelfl(0, mmio + hpriv->irq_cause_ofs);
2673 /* and unmask interrupt generation for host regs */
2674 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2676 if (IS_GEN_I(hpriv))
2677 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2679 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2681 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2682 "PCI int cause/mask=0x%08x/0x%08x\n",
2683 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2684 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2685 readl(mmio + hpriv->irq_cause_ofs),
2686 readl(mmio + hpriv->irq_mask_ofs));
2693 * mv_print_info - Dump key info to kernel log for perusal.
2694 * @host: ATA host to print info about
2696 * FIXME: complete this.
2699 * Inherited from caller.
2701 static void mv_print_info(struct ata_host *host)
2703 struct pci_dev *pdev = to_pci_dev(host->dev);
2704 struct mv_host_priv *hpriv = host->private_data;
2706 const char *scc_s, *gen;
2708 /* Use this to determine the HW stepping of the chip so we know
2709 * what errata to workaround
2711 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2714 else if (scc == 0x01)
2719 if (IS_GEN_I(hpriv))
2721 else if (IS_GEN_II(hpriv))
2723 else if (IS_GEN_IIE(hpriv))
2728 dev_printk(KERN_INFO, &pdev->dev,
2729 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2730 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2731 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2735 * mv_init_one - handle a positive probe of a Marvell host
2736 * @pdev: PCI device found
2737 * @ent: PCI device ID entry for the matched host
2740 * Inherited from caller.
2742 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2744 static int printed_version;
2745 unsigned int board_idx = (unsigned int)ent->driver_data;
2746 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2747 struct ata_host *host;
2748 struct mv_host_priv *hpriv;
2751 if (!printed_version++)
2752 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2755 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2757 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2758 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2759 if (!host || !hpriv)
2761 host->private_data = hpriv;
2763 /* acquire resources */
2764 rc = pcim_enable_device(pdev);
2768 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2770 pcim_pin_device(pdev);
2773 host->iomap = pcim_iomap_table(pdev);
2775 rc = pci_go_64(pdev);
2779 /* initialize adapter */
2780 rc = mv_init_host(host, board_idx);
2784 /* Enable interrupts */
2785 if (msi && pci_enable_msi(pdev))
2788 mv_dump_pci_cfg(pdev, 0x68);
2789 mv_print_info(host);
2791 pci_set_master(pdev);
2792 pci_try_set_mwi(pdev);
2793 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2794 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2797 static int __init mv_init(void)
2799 return pci_register_driver(&mv_pci_driver);
2802 static void __exit mv_exit(void)
2804 pci_unregister_driver(&mv_pci_driver);
2807 MODULE_AUTHOR("Brett Russ");
2808 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2809 MODULE_LICENSE("GPL");
2810 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2811 MODULE_VERSION(DRV_VERSION);
2813 module_param(msi, int, 0444);
2814 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2816 module_init(mv_init);
2817 module_exit(mv_exit);