2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
34 #include <linux/config.h>
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/device.h>
43 #include <scsi/scsi_host.h>
44 #include <linux/libata.h>
46 #define DRV_NAME "sata_nv"
47 #define DRV_VERSION "0.9"
54 NV_PORT0_SCR_REG_OFFSET = 0x00,
55 NV_PORT1_SCR_REG_OFFSET = 0x40,
57 /* INT_STATUS/ENABLE */
60 NV_INT_STATUS_CK804 = 0x440,
61 NV_INT_ENABLE_CK804 = 0x441,
63 /* INT_STATUS/ENABLE bits */
67 NV_INT_REMOVED = 0x08,
69 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
72 NV_INT_MASK = NV_INT_DEV,
76 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
78 // For PCI config register 20
79 NV_MCP_SATA_CFG_20 = 0x50,
80 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
83 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
84 static void nv_ck804_host_stop(struct ata_host_set *host_set);
85 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
86 struct pt_regs *regs);
87 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
88 struct pt_regs *regs);
89 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
90 struct pt_regs *regs);
91 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
92 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
94 static void nv_nf2_freeze(struct ata_port *ap);
95 static void nv_nf2_thaw(struct ata_port *ap);
96 static void nv_ck804_freeze(struct ata_port *ap);
97 static void nv_ck804_thaw(struct ata_port *ap);
98 static void nv_error_handler(struct ata_port *ap);
104 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
108 static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
138 PCI_ANY_ID, PCI_ANY_ID,
139 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
141 PCI_ANY_ID, PCI_ANY_ID,
142 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
143 { 0, } /* terminate list */
146 static struct pci_driver nv_pci_driver = {
148 .id_table = nv_pci_tbl,
149 .probe = nv_init_one,
150 .remove = ata_pci_remove_one,
153 static struct scsi_host_template nv_sht = {
154 .module = THIS_MODULE,
156 .ioctl = ata_scsi_ioctl,
157 .queuecommand = ata_scsi_queuecmd,
158 .can_queue = ATA_DEF_QUEUE,
159 .this_id = ATA_SHT_THIS_ID,
160 .sg_tablesize = LIBATA_MAX_PRD,
161 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
162 .emulated = ATA_SHT_EMULATED,
163 .use_clustering = ATA_SHT_USE_CLUSTERING,
164 .proc_name = DRV_NAME,
165 .dma_boundary = ATA_DMA_BOUNDARY,
166 .slave_configure = ata_scsi_slave_config,
167 .slave_destroy = ata_scsi_slave_destroy,
168 .bios_param = ata_std_bios_param,
171 static const struct ata_port_operations nv_generic_ops = {
172 .port_disable = ata_port_disable,
173 .tf_load = ata_tf_load,
174 .tf_read = ata_tf_read,
175 .exec_command = ata_exec_command,
176 .check_status = ata_check_status,
177 .dev_select = ata_std_dev_select,
178 .bmdma_setup = ata_bmdma_setup,
179 .bmdma_start = ata_bmdma_start,
180 .bmdma_stop = ata_bmdma_stop,
181 .bmdma_status = ata_bmdma_status,
182 .qc_prep = ata_qc_prep,
183 .qc_issue = ata_qc_issue_prot,
184 .freeze = ata_bmdma_freeze,
185 .thaw = ata_bmdma_thaw,
186 .error_handler = nv_error_handler,
187 .post_internal_cmd = ata_bmdma_post_internal_cmd,
188 .data_xfer = ata_pio_data_xfer,
189 .irq_handler = nv_generic_interrupt,
190 .irq_clear = ata_bmdma_irq_clear,
191 .scr_read = nv_scr_read,
192 .scr_write = nv_scr_write,
193 .port_start = ata_port_start,
194 .port_stop = ata_port_stop,
195 .host_stop = ata_pci_host_stop,
198 static const struct ata_port_operations nv_nf2_ops = {
199 .port_disable = ata_port_disable,
200 .tf_load = ata_tf_load,
201 .tf_read = ata_tf_read,
202 .exec_command = ata_exec_command,
203 .check_status = ata_check_status,
204 .dev_select = ata_std_dev_select,
205 .bmdma_setup = ata_bmdma_setup,
206 .bmdma_start = ata_bmdma_start,
207 .bmdma_stop = ata_bmdma_stop,
208 .bmdma_status = ata_bmdma_status,
209 .qc_prep = ata_qc_prep,
210 .qc_issue = ata_qc_issue_prot,
211 .freeze = nv_nf2_freeze,
213 .error_handler = nv_error_handler,
214 .post_internal_cmd = ata_bmdma_post_internal_cmd,
215 .data_xfer = ata_pio_data_xfer,
216 .irq_handler = nv_nf2_interrupt,
217 .irq_clear = ata_bmdma_irq_clear,
218 .scr_read = nv_scr_read,
219 .scr_write = nv_scr_write,
220 .port_start = ata_port_start,
221 .port_stop = ata_port_stop,
222 .host_stop = ata_pci_host_stop,
225 static const struct ata_port_operations nv_ck804_ops = {
226 .port_disable = ata_port_disable,
227 .tf_load = ata_tf_load,
228 .tf_read = ata_tf_read,
229 .exec_command = ata_exec_command,
230 .check_status = ata_check_status,
231 .dev_select = ata_std_dev_select,
232 .bmdma_setup = ata_bmdma_setup,
233 .bmdma_start = ata_bmdma_start,
234 .bmdma_stop = ata_bmdma_stop,
235 .bmdma_status = ata_bmdma_status,
236 .qc_prep = ata_qc_prep,
237 .qc_issue = ata_qc_issue_prot,
238 .freeze = nv_ck804_freeze,
239 .thaw = nv_ck804_thaw,
240 .error_handler = nv_error_handler,
241 .post_internal_cmd = ata_bmdma_post_internal_cmd,
242 .data_xfer = ata_pio_data_xfer,
243 .irq_handler = nv_ck804_interrupt,
244 .irq_clear = ata_bmdma_irq_clear,
245 .scr_read = nv_scr_read,
246 .scr_write = nv_scr_write,
247 .port_start = ata_port_start,
248 .port_stop = ata_port_stop,
249 .host_stop = nv_ck804_host_stop,
252 static struct ata_port_info nv_port_info[] = {
256 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
257 .pio_mask = NV_PIO_MASK,
258 .mwdma_mask = NV_MWDMA_MASK,
259 .udma_mask = NV_UDMA_MASK,
260 .port_ops = &nv_generic_ops,
265 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
266 .pio_mask = NV_PIO_MASK,
267 .mwdma_mask = NV_MWDMA_MASK,
268 .udma_mask = NV_UDMA_MASK,
269 .port_ops = &nv_nf2_ops,
274 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
275 .pio_mask = NV_PIO_MASK,
276 .mwdma_mask = NV_MWDMA_MASK,
277 .udma_mask = NV_UDMA_MASK,
278 .port_ops = &nv_ck804_ops,
282 MODULE_AUTHOR("NVIDIA");
283 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
284 MODULE_LICENSE("GPL");
285 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
286 MODULE_VERSION(DRV_VERSION);
288 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
289 struct pt_regs *regs)
291 struct ata_host_set *host_set = dev_instance;
293 unsigned int handled = 0;
296 spin_lock_irqsave(&host_set->lock, flags);
298 for (i = 0; i < host_set->n_ports; i++) {
301 ap = host_set->ports[i];
303 !(ap->flags & ATA_FLAG_DISABLED)) {
304 struct ata_queued_cmd *qc;
306 qc = ata_qc_from_tag(ap, ap->active_tag);
307 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
308 handled += ata_host_intr(ap, qc);
310 // No request pending? Clear interrupt status
311 // anyway, in case there's one pending.
312 ap->ops->check_status(ap);
317 spin_unlock_irqrestore(&host_set->lock, flags);
319 return IRQ_RETVAL(handled);
322 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
324 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
327 /* bail out if not our interrupt */
328 if (!(irq_stat & NV_INT_DEV))
331 /* DEV interrupt w/ no active qc? */
332 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
333 ata_check_status(ap);
337 /* handle interrupt */
338 handled = ata_host_intr(ap, qc);
339 if (unlikely(!handled)) {
340 /* spurious, clear it */
341 ata_check_status(ap);
347 static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
351 for (i = 0; i < host_set->n_ports; i++) {
352 struct ata_port *ap = host_set->ports[i];
354 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
355 handled += nv_host_intr(ap, irq_stat);
357 irq_stat >>= NV_INT_PORT_SHIFT;
360 return IRQ_RETVAL(handled);
363 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
364 struct pt_regs *regs)
366 struct ata_host_set *host_set = dev_instance;
371 spin_lock_irqsave(&host_set->lock, flags);
372 irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
373 ret = nv_do_interrupt(host_set, irq_stat);
374 spin_unlock_irqrestore(&host_set->lock, flags);
379 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
380 struct pt_regs *regs)
382 struct ata_host_set *host_set = dev_instance;
387 spin_lock_irqsave(&host_set->lock, flags);
388 irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
389 ret = nv_do_interrupt(host_set, irq_stat);
390 spin_unlock_irqrestore(&host_set->lock, flags);
395 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
397 if (sc_reg > SCR_CONTROL)
400 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
403 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
405 if (sc_reg > SCR_CONTROL)
408 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
411 static void nv_nf2_freeze(struct ata_port *ap)
413 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
414 int shift = ap->port_no * NV_INT_PORT_SHIFT;
417 mask = inb(scr_addr + NV_INT_ENABLE);
418 mask &= ~(NV_INT_ALL << shift);
419 outb(mask, scr_addr + NV_INT_ENABLE);
422 static void nv_nf2_thaw(struct ata_port *ap)
424 unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
425 int shift = ap->port_no * NV_INT_PORT_SHIFT;
428 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
430 mask = inb(scr_addr + NV_INT_ENABLE);
431 mask |= (NV_INT_MASK << shift);
432 outb(mask, scr_addr + NV_INT_ENABLE);
435 static void nv_ck804_freeze(struct ata_port *ap)
437 void __iomem *mmio_base = ap->host_set->mmio_base;
438 int shift = ap->port_no * NV_INT_PORT_SHIFT;
441 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
442 mask &= ~(NV_INT_ALL << shift);
443 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
446 static void nv_ck804_thaw(struct ata_port *ap)
448 void __iomem *mmio_base = ap->host_set->mmio_base;
449 int shift = ap->port_no * NV_INT_PORT_SHIFT;
452 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
454 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
455 mask |= (NV_INT_MASK << shift);
456 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
459 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
463 /* SATA hardreset fails to retrieve proper device signature on
464 * some controllers. Don't classify on hardreset. For more
465 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
467 return sata_std_hardreset(ap, &dummy);
470 static void nv_error_handler(struct ata_port *ap)
472 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
473 nv_hardreset, ata_std_postreset);
476 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
478 static int printed_version = 0;
479 struct ata_port_info *ppi;
480 struct ata_probe_ent *probe_ent;
481 int pci_dev_busy = 0;
486 // Make sure this is a SATA controller by counting the number of bars
487 // (NVIDIA SATA controllers will always have six bars). Otherwise,
488 // it's an IDE controller and we ignore it.
489 for (bar=0; bar<6; bar++)
490 if (pci_resource_start(pdev, bar) == 0)
493 if (!printed_version++)
494 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
496 rc = pci_enable_device(pdev);
500 rc = pci_request_regions(pdev, DRV_NAME);
503 goto err_out_disable;
506 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
508 goto err_out_regions;
509 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
511 goto err_out_regions;
515 ppi = &nv_port_info[ent->driver_data];
516 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
518 goto err_out_regions;
520 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
521 if (!probe_ent->mmio_base) {
523 goto err_out_free_ent;
526 base = (unsigned long)probe_ent->mmio_base;
528 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
529 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
531 /* enable SATA space for CK804 */
532 if (ent->driver_data == CK804) {
535 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
536 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
537 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
540 pci_set_master(pdev);
542 rc = ata_device_add(probe_ent);
544 goto err_out_iounmap;
551 pci_iounmap(pdev, probe_ent->mmio_base);
555 pci_release_regions(pdev);
558 pci_disable_device(pdev);
563 static void nv_ck804_host_stop(struct ata_host_set *host_set)
565 struct pci_dev *pdev = to_pci_dev(host_set->dev);
568 /* disable SATA space for CK804 */
569 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
570 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
571 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
573 ata_pci_host_stop(host_set);
576 static int __init nv_init(void)
578 return pci_module_init(&nv_pci_driver);
581 static void __exit nv_exit(void)
583 pci_unregister_driver(&nv_pci_driver);
586 module_init(nv_init);
587 module_exit(nv_exit);