2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
225 struct nv_host_priv {
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
234 static int nv_pci_device_resume(struct pci_dev *pdev);
236 static void nv_ck804_host_stop(struct ata_host *host);
237 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
239 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
240 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
241 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
243 static void nv_nf2_freeze(struct ata_port *ap);
244 static void nv_nf2_thaw(struct ata_port *ap);
245 static void nv_ck804_freeze(struct ata_port *ap);
246 static void nv_ck804_thaw(struct ata_port *ap);
247 static void nv_error_handler(struct ata_port *ap);
248 static int nv_adma_slave_config(struct scsi_device *sdev);
249 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
250 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
251 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
252 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
253 static void nv_adma_irq_clear(struct ata_port *ap);
254 static int nv_adma_port_start(struct ata_port *ap);
255 static void nv_adma_port_stop(struct ata_port *ap);
257 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258 static int nv_adma_port_resume(struct ata_port *ap);
260 static void nv_adma_error_handler(struct ata_port *ap);
261 static void nv_adma_host_stop(struct ata_host *host);
262 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
268 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
273 static const struct pci_device_id nv_pci_tbl[] = {
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
288 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
289 PCI_ANY_ID, PCI_ANY_ID,
290 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
291 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
292 PCI_ANY_ID, PCI_ANY_ID,
293 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
295 { } /* terminate list */
298 static struct pci_driver nv_pci_driver = {
300 .id_table = nv_pci_tbl,
301 .probe = nv_init_one,
303 .suspend = ata_pci_device_suspend,
304 .resume = nv_pci_device_resume,
306 .remove = nv_remove_one,
309 static struct scsi_host_template nv_sht = {
310 .module = THIS_MODULE,
312 .ioctl = ata_scsi_ioctl,
313 .queuecommand = ata_scsi_queuecmd,
314 .can_queue = ATA_DEF_QUEUE,
315 .this_id = ATA_SHT_THIS_ID,
316 .sg_tablesize = LIBATA_MAX_PRD,
317 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
318 .emulated = ATA_SHT_EMULATED,
319 .use_clustering = ATA_SHT_USE_CLUSTERING,
320 .proc_name = DRV_NAME,
321 .dma_boundary = ATA_DMA_BOUNDARY,
322 .slave_configure = ata_scsi_slave_config,
323 .slave_destroy = ata_scsi_slave_destroy,
324 .bios_param = ata_std_bios_param,
326 .suspend = ata_scsi_device_suspend,
327 .resume = ata_scsi_device_resume,
331 static struct scsi_host_template nv_adma_sht = {
332 .module = THIS_MODULE,
334 .ioctl = ata_scsi_ioctl,
335 .queuecommand = ata_scsi_queuecmd,
336 .can_queue = NV_ADMA_MAX_CPBS,
337 .this_id = ATA_SHT_THIS_ID,
338 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
339 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
340 .emulated = ATA_SHT_EMULATED,
341 .use_clustering = ATA_SHT_USE_CLUSTERING,
342 .proc_name = DRV_NAME,
343 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
344 .slave_configure = nv_adma_slave_config,
345 .slave_destroy = ata_scsi_slave_destroy,
346 .bios_param = ata_std_bios_param,
348 .suspend = ata_scsi_device_suspend,
349 .resume = ata_scsi_device_resume,
353 static const struct ata_port_operations nv_generic_ops = {
354 .port_disable = ata_port_disable,
355 .tf_load = ata_tf_load,
356 .tf_read = ata_tf_read,
357 .exec_command = ata_exec_command,
358 .check_status = ata_check_status,
359 .dev_select = ata_std_dev_select,
360 .bmdma_setup = ata_bmdma_setup,
361 .bmdma_start = ata_bmdma_start,
362 .bmdma_stop = ata_bmdma_stop,
363 .bmdma_status = ata_bmdma_status,
364 .qc_prep = ata_qc_prep,
365 .qc_issue = ata_qc_issue_prot,
366 .freeze = ata_bmdma_freeze,
367 .thaw = ata_bmdma_thaw,
368 .error_handler = nv_error_handler,
369 .post_internal_cmd = ata_bmdma_post_internal_cmd,
370 .data_xfer = ata_data_xfer,
371 .irq_handler = nv_generic_interrupt,
372 .irq_clear = ata_bmdma_irq_clear,
373 .irq_on = ata_irq_on,
374 .irq_ack = ata_irq_ack,
375 .scr_read = nv_scr_read,
376 .scr_write = nv_scr_write,
377 .port_start = ata_port_start,
380 static const struct ata_port_operations nv_nf2_ops = {
381 .port_disable = ata_port_disable,
382 .tf_load = ata_tf_load,
383 .tf_read = ata_tf_read,
384 .exec_command = ata_exec_command,
385 .check_status = ata_check_status,
386 .dev_select = ata_std_dev_select,
387 .bmdma_setup = ata_bmdma_setup,
388 .bmdma_start = ata_bmdma_start,
389 .bmdma_stop = ata_bmdma_stop,
390 .bmdma_status = ata_bmdma_status,
391 .qc_prep = ata_qc_prep,
392 .qc_issue = ata_qc_issue_prot,
393 .freeze = nv_nf2_freeze,
395 .error_handler = nv_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd,
397 .data_xfer = ata_data_xfer,
398 .irq_handler = nv_nf2_interrupt,
399 .irq_clear = ata_bmdma_irq_clear,
400 .irq_on = ata_irq_on,
401 .irq_ack = ata_irq_ack,
402 .scr_read = nv_scr_read,
403 .scr_write = nv_scr_write,
404 .port_start = ata_port_start,
407 static const struct ata_port_operations nv_ck804_ops = {
408 .port_disable = ata_port_disable,
409 .tf_load = ata_tf_load,
410 .tf_read = ata_tf_read,
411 .exec_command = ata_exec_command,
412 .check_status = ata_check_status,
413 .dev_select = ata_std_dev_select,
414 .bmdma_setup = ata_bmdma_setup,
415 .bmdma_start = ata_bmdma_start,
416 .bmdma_stop = ata_bmdma_stop,
417 .bmdma_status = ata_bmdma_status,
418 .qc_prep = ata_qc_prep,
419 .qc_issue = ata_qc_issue_prot,
420 .freeze = nv_ck804_freeze,
421 .thaw = nv_ck804_thaw,
422 .error_handler = nv_error_handler,
423 .post_internal_cmd = ata_bmdma_post_internal_cmd,
424 .data_xfer = ata_data_xfer,
425 .irq_handler = nv_ck804_interrupt,
426 .irq_clear = ata_bmdma_irq_clear,
427 .irq_on = ata_irq_on,
428 .irq_ack = ata_irq_ack,
429 .scr_read = nv_scr_read,
430 .scr_write = nv_scr_write,
431 .port_start = ata_port_start,
432 .host_stop = nv_ck804_host_stop,
435 static const struct ata_port_operations nv_adma_ops = {
436 .port_disable = ata_port_disable,
437 .tf_load = ata_tf_load,
438 .tf_read = ata_tf_read,
439 .check_atapi_dma = nv_adma_check_atapi_dma,
440 .exec_command = ata_exec_command,
441 .check_status = ata_check_status,
442 .dev_select = ata_std_dev_select,
443 .bmdma_setup = ata_bmdma_setup,
444 .bmdma_start = ata_bmdma_start,
445 .bmdma_stop = ata_bmdma_stop,
446 .bmdma_status = ata_bmdma_status,
447 .qc_prep = nv_adma_qc_prep,
448 .qc_issue = nv_adma_qc_issue,
449 .freeze = nv_ck804_freeze,
450 .thaw = nv_ck804_thaw,
451 .error_handler = nv_adma_error_handler,
452 .post_internal_cmd = nv_adma_post_internal_cmd,
453 .data_xfer = ata_data_xfer,
454 .irq_handler = nv_adma_interrupt,
455 .irq_clear = nv_adma_irq_clear,
456 .irq_on = ata_irq_on,
457 .irq_ack = ata_irq_ack,
458 .scr_read = nv_scr_read,
459 .scr_write = nv_scr_write,
460 .port_start = nv_adma_port_start,
461 .port_stop = nv_adma_port_stop,
463 .port_suspend = nv_adma_port_suspend,
464 .port_resume = nv_adma_port_resume,
466 .host_stop = nv_adma_host_stop,
469 static struct ata_port_info nv_port_info[] = {
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_generic_ops,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_nf2_ops,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 ATA_FLAG_HRST_TO_RESUME,
495 .pio_mask = NV_PIO_MASK,
496 .mwdma_mask = NV_MWDMA_MASK,
497 .udma_mask = NV_UDMA_MASK,
498 .port_ops = &nv_ck804_ops,
503 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
504 ATA_FLAG_HRST_TO_RESUME |
505 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
506 .pio_mask = NV_PIO_MASK,
507 .mwdma_mask = NV_MWDMA_MASK,
508 .udma_mask = NV_UDMA_MASK,
509 .port_ops = &nv_adma_ops,
513 MODULE_AUTHOR("NVIDIA");
514 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
515 MODULE_LICENSE("GPL");
516 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
517 MODULE_VERSION(DRV_VERSION);
519 static int adma_enabled = 1;
521 static void nv_adma_register_mode(struct ata_port *ap)
523 struct nv_adma_port_priv *pp = ap->private_data;
524 void __iomem *mmio = pp->ctl_block;
528 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
531 status = readw(mmio + NV_ADMA_STAT);
532 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
534 status = readw(mmio + NV_ADMA_STAT);
538 ata_port_printk(ap, KERN_WARNING,
539 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
546 status = readw(mmio + NV_ADMA_STAT);
547 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
549 status = readw(mmio + NV_ADMA_STAT);
553 ata_port_printk(ap, KERN_WARNING,
554 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
557 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
560 static void nv_adma_mode(struct ata_port *ap)
562 struct nv_adma_port_priv *pp = ap->private_data;
563 void __iomem *mmio = pp->ctl_block;
567 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
570 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
572 tmp = readw(mmio + NV_ADMA_CTL);
573 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
575 status = readw(mmio + NV_ADMA_STAT);
576 while(((status & NV_ADMA_STAT_LEGACY) ||
577 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
579 status = readw(mmio + NV_ADMA_STAT);
583 ata_port_printk(ap, KERN_WARNING,
584 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
587 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
590 static int nv_adma_slave_config(struct scsi_device *sdev)
592 struct ata_port *ap = ata_shost_to_port(sdev->host);
593 struct nv_adma_port_priv *pp = ap->private_data;
594 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
596 unsigned long segment_boundary;
597 unsigned short sg_tablesize;
600 u32 current_reg, new_reg, config_mask;
602 rc = ata_scsi_slave_config(sdev);
604 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
605 /* Not a proper libata device, ignore */
608 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
610 * NVIDIA reports that ADMA mode does not support ATAPI commands.
611 * Therefore ATAPI commands are sent through the legacy interface.
612 * However, the legacy interface only supports 32-bit DMA.
613 * Restrict DMA parameters as required by the legacy interface
614 * when an ATAPI device is connected.
616 bounce_limit = ATA_DMA_MASK;
617 segment_boundary = ATA_DMA_BOUNDARY;
618 /* Subtract 1 since an extra entry may be needed for padding, see
620 sg_tablesize = LIBATA_MAX_PRD - 1;
622 /* Since the legacy DMA engine is in use, we need to disable ADMA
625 nv_adma_register_mode(ap);
628 bounce_limit = *ap->dev->dma_mask;
629 segment_boundary = NV_ADMA_DMA_BOUNDARY;
630 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
634 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
637 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
638 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
640 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
641 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
644 new_reg = current_reg | config_mask;
645 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
648 new_reg = current_reg & ~config_mask;
649 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
652 if(current_reg != new_reg)
653 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
655 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
656 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
657 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
658 ata_port_printk(ap, KERN_INFO,
659 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
660 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
664 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
666 struct nv_adma_port_priv *pp = qc->ap->private_data;
667 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
670 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
672 unsigned int idx = 0;
674 if(tf->flags & ATA_TFLAG_ISADDR) {
675 if (tf->flags & ATA_TFLAG_LBA48) {
676 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
677 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
679 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
680 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
681 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
683 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
685 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
686 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
687 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
688 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
691 if(tf->flags & ATA_TFLAG_DEVICE)
692 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
694 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
697 cpb[idx++] = cpu_to_le16(IGN);
702 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
704 struct nv_adma_port_priv *pp = ap->private_data;
705 u8 flags = pp->cpb[cpb_num].resp_flags;
707 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
709 if (unlikely((force_err ||
710 flags & (NV_CPB_RESP_ATA_ERR |
711 NV_CPB_RESP_CMD_ERR |
712 NV_CPB_RESP_CPB_ERR)))) {
713 struct ata_eh_info *ehi = &ap->eh_info;
716 ata_ehi_clear_desc(ehi);
717 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
718 if (flags & NV_CPB_RESP_ATA_ERR) {
719 ata_ehi_push_desc(ehi, ": ATA error");
720 ehi->err_mask |= AC_ERR_DEV;
721 } else if (flags & NV_CPB_RESP_CMD_ERR) {
722 ata_ehi_push_desc(ehi, ": CMD error");
723 ehi->err_mask |= AC_ERR_DEV;
724 } else if (flags & NV_CPB_RESP_CPB_ERR) {
725 ata_ehi_push_desc(ehi, ": CPB error");
726 ehi->err_mask |= AC_ERR_SYSTEM;
729 /* notifier error, but no error in CPB flags? */
730 ehi->err_mask |= AC_ERR_OTHER;
733 /* Kill all commands. EH will determine what actually failed. */
741 if (flags & NV_CPB_RESP_DONE) {
742 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
743 VPRINTK("CPB flags done, flags=0x%x\n", flags);
745 /* Grab the ATA port status for non-NCQ commands.
746 For NCQ commands the current status may have nothing to do with
747 the command just completed. */
748 if (qc->tf.protocol != ATA_PROT_NCQ) {
749 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
750 qc->err_mask |= ac_err_mask(ata_status);
752 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
756 struct ata_eh_info *ehi = &ap->eh_info;
757 /* Notifier bits set without a command may indicate the drive
758 is misbehaving. Raise host state machine violation on this
760 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
762 ehi->err_mask |= AC_ERR_HSM;
763 ehi->action |= ATA_EH_SOFTRESET;
771 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
773 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
775 /* freeze if hotplugged */
776 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
781 /* bail out if not our interrupt */
782 if (!(irq_stat & NV_INT_DEV))
785 /* DEV interrupt w/ no active qc? */
786 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
787 ata_check_status(ap);
791 /* handle interrupt */
792 return ata_host_intr(ap, qc);
795 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
797 struct ata_host *host = dev_instance;
799 u32 notifier_clears[2];
801 spin_lock(&host->lock);
803 for (i = 0; i < host->n_ports; i++) {
804 struct ata_port *ap = host->ports[i];
805 notifier_clears[i] = 0;
807 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
808 struct nv_adma_port_priv *pp = ap->private_data;
809 void __iomem *mmio = pp->ctl_block;
812 u32 notifier, notifier_error;
814 /* if in ATA register mode, use standard ata interrupt handler */
815 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
816 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
817 >> (NV_INT_PORT_SHIFT * i);
818 if(ata_tag_valid(ap->active_tag))
819 /** NV_INT_DEV indication seems unreliable at times
820 at least in ADMA mode. Force it on always when a
821 command is active, to prevent losing interrupts. */
822 irq_stat |= NV_INT_DEV;
823 handled += nv_host_intr(ap, irq_stat);
827 notifier = readl(mmio + NV_ADMA_NOTIFIER);
828 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
829 notifier_clears[i] = notifier | notifier_error;
831 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
833 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
838 status = readw(mmio + NV_ADMA_STAT);
840 /* Clear status. Ensure the controller sees the clearing before we start
841 looking at any of the CPB statuses, so that any CPB completions after
842 this point in the handler will raise another interrupt. */
843 writew(status, mmio + NV_ADMA_STAT);
844 readw(mmio + NV_ADMA_STAT); /* flush posted write */
847 handled++; /* irq handled if we got here */
849 /* freeze if hotplugged or controller error */
850 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
851 NV_ADMA_STAT_HOTUNPLUG |
852 NV_ADMA_STAT_TIMEOUT |
853 NV_ADMA_STAT_SERROR))) {
854 struct ata_eh_info *ehi = &ap->eh_info;
856 ata_ehi_clear_desc(ehi);
857 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
858 if (status & NV_ADMA_STAT_TIMEOUT) {
859 ehi->err_mask |= AC_ERR_SYSTEM;
860 ata_ehi_push_desc(ehi, ": timeout");
861 } else if (status & NV_ADMA_STAT_HOTPLUG) {
862 ata_ehi_hotplugged(ehi);
863 ata_ehi_push_desc(ehi, ": hotplug");
864 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
865 ata_ehi_hotplugged(ehi);
866 ata_ehi_push_desc(ehi, ": hot unplug");
867 } else if (status & NV_ADMA_STAT_SERROR) {
868 /* let libata analyze SError and figure out the cause */
869 ata_ehi_push_desc(ehi, ": SError");
875 if (status & (NV_ADMA_STAT_DONE |
876 NV_ADMA_STAT_CPBERR)) {
877 u32 check_commands = notifier | notifier_error;
879 /** Check CPBs for completed commands */
880 while ((pos = ffs(check_commands)) && !error) {
882 error = nv_adma_check_cpb(ap, pos,
883 notifier_error & (1 << pos) );
884 check_commands &= ~(1 << pos );
890 if(notifier_clears[0] || notifier_clears[1]) {
891 /* Note: Both notifier clear registers must be written
892 if either is set, even if one is zero, according to NVIDIA. */
893 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
894 writel(notifier_clears[0], pp->notifier_clear_block);
895 pp = host->ports[1]->private_data;
896 writel(notifier_clears[1], pp->notifier_clear_block);
899 spin_unlock(&host->lock);
901 return IRQ_RETVAL(handled);
904 static void nv_adma_irq_clear(struct ata_port *ap)
906 struct nv_adma_port_priv *pp = ap->private_data;
907 void __iomem *mmio = pp->ctl_block;
908 u16 status = readw(mmio + NV_ADMA_STAT);
909 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
910 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
911 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
913 /* clear ADMA status */
914 writew(status, mmio + NV_ADMA_STAT);
915 writel(notifier | notifier_error,
916 pp->notifier_clear_block);
918 /** clear legacy status */
919 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
922 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
924 struct nv_adma_port_priv *pp = qc->ap->private_data;
926 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
927 ata_bmdma_post_internal_cmd(qc);
930 static int nv_adma_port_start(struct ata_port *ap)
932 struct device *dev = ap->host->dev;
933 struct nv_adma_port_priv *pp;
942 rc = ata_port_start(ap);
946 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
950 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
951 ap->port_no * NV_ADMA_PORT_SIZE;
952 pp->ctl_block = mmio;
953 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
954 pp->notifier_clear_block = pp->gen_block +
955 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
957 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
958 &mem_dma, GFP_KERNEL);
961 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
964 * First item in chunk of DMA memory:
965 * 128-byte command parameter block (CPB)
966 * one for each command tag
969 pp->cpb_dma = mem_dma;
971 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
972 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
974 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
975 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
978 * Second item: block of ADMA_SGTBL_LEN s/g entries
981 pp->aprd_dma = mem_dma;
983 ap->private_data = pp;
985 /* clear any outstanding interrupt conditions */
986 writew(0xffff, mmio + NV_ADMA_STAT);
988 /* initialize port variables */
989 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
991 /* clear CPB fetch count */
992 writew(0, mmio + NV_ADMA_CPB_COUNT);
994 /* clear GO for register mode, enable interrupt */
995 tmp = readw(mmio + NV_ADMA_CTL);
996 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
997 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
999 tmp = readw(mmio + NV_ADMA_CTL);
1000 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1001 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1003 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1004 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1009 static void nv_adma_port_stop(struct ata_port *ap)
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = pp->ctl_block;
1015 writew(0, mmio + NV_ADMA_CTL);
1019 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1021 struct nv_adma_port_priv *pp = ap->private_data;
1022 void __iomem *mmio = pp->ctl_block;
1024 /* Go to register mode - clears GO */
1025 nv_adma_register_mode(ap);
1027 /* clear CPB fetch count */
1028 writew(0, mmio + NV_ADMA_CPB_COUNT);
1030 /* disable interrupt, shut down port */
1031 writew(0, mmio + NV_ADMA_CTL);
1036 static int nv_adma_port_resume(struct ata_port *ap)
1038 struct nv_adma_port_priv *pp = ap->private_data;
1039 void __iomem *mmio = pp->ctl_block;
1042 /* set CPB block location */
1043 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1044 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1046 /* clear any outstanding interrupt conditions */
1047 writew(0xffff, mmio + NV_ADMA_STAT);
1049 /* initialize port variables */
1050 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1052 /* clear CPB fetch count */
1053 writew(0, mmio + NV_ADMA_CPB_COUNT);
1055 /* clear GO for register mode, enable interrupt */
1056 tmp = readw(mmio + NV_ADMA_CTL);
1057 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1058 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1060 tmp = readw(mmio + NV_ADMA_CTL);
1061 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1062 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1064 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1065 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1071 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1073 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1074 struct ata_ioports *ioport = &probe_ent->port[port];
1078 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1080 ioport->cmd_addr = mmio;
1081 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1082 ioport->error_addr =
1083 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1084 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1085 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1086 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1087 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1088 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1089 ioport->status_addr =
1090 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1091 ioport->altstatus_addr =
1092 ioport->ctl_addr = mmio + 0x20;
1095 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1097 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1103 /* enable ADMA on the ports */
1104 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1105 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1106 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1107 NV_MCP_SATA_CFG_20_PORT1_EN |
1108 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1110 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1112 for (i = 0; i < probe_ent->n_ports; i++)
1113 nv_adma_setup_port(probe_ent, i);
1118 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1119 struct scatterlist *sg,
1121 struct nv_adma_prd *aprd)
1124 if (qc->tf.flags & ATA_TFLAG_WRITE)
1125 flags |= NV_APRD_WRITE;
1126 if (idx == qc->n_elem - 1)
1127 flags |= NV_APRD_END;
1129 flags |= NV_APRD_CONT;
1131 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1132 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1133 aprd->flags = flags;
1134 aprd->packet_len = 0;
1137 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1139 struct nv_adma_port_priv *pp = qc->ap->private_data;
1141 struct nv_adma_prd *aprd;
1142 struct scatterlist *sg;
1148 ata_for_each_sg(sg, qc) {
1149 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1150 nv_adma_fill_aprd(qc, sg, idx, aprd);
1154 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1156 cpb->next_aprd = cpu_to_le64(0);
1159 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1161 struct nv_adma_port_priv *pp = qc->ap->private_data;
1163 /* ADMA engine can only be used for non-ATAPI DMA commands,
1164 or interrupt-driven no-data commands. */
1165 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1166 (qc->tf.flags & ATA_TFLAG_POLLING))
1169 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1170 (qc->tf.protocol == ATA_PROT_NODATA))
1176 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1178 struct nv_adma_port_priv *pp = qc->ap->private_data;
1179 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1180 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1183 if (nv_adma_use_reg_mode(qc)) {
1184 nv_adma_register_mode(qc->ap);
1189 cpb->resp_flags = NV_CPB_RESP_DONE;
1196 cpb->next_cpb_idx = 0;
1198 /* turn on NCQ flags for NCQ commands */
1199 if (qc->tf.protocol == ATA_PROT_NCQ)
1200 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1202 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1204 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1206 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1207 nv_adma_fill_sg(qc, cpb);
1208 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1210 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1212 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1213 finished filling in all of the contents */
1215 cpb->ctl_flags = ctl_flags;
1217 cpb->resp_flags = 0;
1220 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1222 struct nv_adma_port_priv *pp = qc->ap->private_data;
1223 void __iomem *mmio = pp->ctl_block;
1224 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1228 if (nv_adma_use_reg_mode(qc)) {
1229 /* use ATA register mode */
1230 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1231 nv_adma_register_mode(qc->ap);
1232 return ata_qc_issue_prot(qc);
1234 nv_adma_mode(qc->ap);
1236 /* write append register, command tag in lower 8 bits
1237 and (number of cpbs to append -1) in top 8 bits */
1240 if(curr_ncq != pp->last_issue_ncq) {
1241 /* Seems to need some delay before switching between NCQ and non-NCQ
1242 commands, else we get command timeouts and such. */
1244 pp->last_issue_ncq = curr_ncq;
1247 writew(qc->tag, mmio + NV_ADMA_APPEND);
1249 DPRINTK("Issued tag %u\n",qc->tag);
1254 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1256 struct ata_host *host = dev_instance;
1258 unsigned int handled = 0;
1259 unsigned long flags;
1261 spin_lock_irqsave(&host->lock, flags);
1263 for (i = 0; i < host->n_ports; i++) {
1264 struct ata_port *ap;
1266 ap = host->ports[i];
1268 !(ap->flags & ATA_FLAG_DISABLED)) {
1269 struct ata_queued_cmd *qc;
1271 qc = ata_qc_from_tag(ap, ap->active_tag);
1272 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1273 handled += ata_host_intr(ap, qc);
1275 // No request pending? Clear interrupt status
1276 // anyway, in case there's one pending.
1277 ap->ops->check_status(ap);
1282 spin_unlock_irqrestore(&host->lock, flags);
1284 return IRQ_RETVAL(handled);
1287 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1291 for (i = 0; i < host->n_ports; i++) {
1292 struct ata_port *ap = host->ports[i];
1294 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1295 handled += nv_host_intr(ap, irq_stat);
1297 irq_stat >>= NV_INT_PORT_SHIFT;
1300 return IRQ_RETVAL(handled);
1303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1305 struct ata_host *host = dev_instance;
1309 spin_lock(&host->lock);
1310 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1311 ret = nv_do_interrupt(host, irq_stat);
1312 spin_unlock(&host->lock);
1317 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1319 struct ata_host *host = dev_instance;
1323 spin_lock(&host->lock);
1324 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1325 ret = nv_do_interrupt(host, irq_stat);
1326 spin_unlock(&host->lock);
1331 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1333 if (sc_reg > SCR_CONTROL)
1336 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1339 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1341 if (sc_reg > SCR_CONTROL)
1344 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1347 static void nv_nf2_freeze(struct ata_port *ap)
1349 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1350 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1353 mask = ioread8(scr_addr + NV_INT_ENABLE);
1354 mask &= ~(NV_INT_ALL << shift);
1355 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1358 static void nv_nf2_thaw(struct ata_port *ap)
1360 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1361 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1364 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1366 mask = ioread8(scr_addr + NV_INT_ENABLE);
1367 mask |= (NV_INT_MASK << shift);
1368 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1371 static void nv_ck804_freeze(struct ata_port *ap)
1373 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1374 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1377 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1378 mask &= ~(NV_INT_ALL << shift);
1379 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1382 static void nv_ck804_thaw(struct ata_port *ap)
1384 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1385 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1388 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1390 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1391 mask |= (NV_INT_MASK << shift);
1392 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1395 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1399 /* SATA hardreset fails to retrieve proper device signature on
1400 * some controllers. Don't classify on hardreset. For more
1401 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1403 return sata_std_hardreset(ap, &dummy);
1406 static void nv_error_handler(struct ata_port *ap)
1408 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1409 nv_hardreset, ata_std_postreset);
1412 static void nv_adma_error_handler(struct ata_port *ap)
1414 struct nv_adma_port_priv *pp = ap->private_data;
1415 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1416 void __iomem *mmio = pp->ctl_block;
1420 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1421 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1422 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1423 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1424 u32 status = readw(mmio + NV_ADMA_STAT);
1425 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1426 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1428 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1429 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1430 "next cpb count 0x%X next cpb idx 0x%x\n",
1431 notifier, notifier_error, gen_ctl, status,
1432 cpb_count, next_cpb_idx);
1434 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1435 struct nv_adma_cpb *cpb = &pp->cpb[i];
1436 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1437 ap->sactive & (1 << i) )
1438 ata_port_printk(ap, KERN_ERR,
1439 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1440 i, cpb->ctl_flags, cpb->resp_flags);
1444 /* Push us back into port register mode for error handling. */
1445 nv_adma_register_mode(ap);
1447 /* Mark all of the CPBs as invalid to prevent them from being executed */
1448 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1449 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1451 /* clear CPB fetch count */
1452 writew(0, mmio + NV_ADMA_CPB_COUNT);
1455 tmp = readw(mmio + NV_ADMA_CTL);
1456 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1457 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1459 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1460 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1463 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1464 nv_hardreset, ata_std_postreset);
1467 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1469 static int printed_version = 0;
1470 struct ata_port_info *ppi[2];
1471 struct ata_probe_ent *probe_ent;
1472 struct nv_host_priv *hpriv;
1476 unsigned long type = ent->driver_data;
1479 // Make sure this is a SATA controller by counting the number of bars
1480 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1481 // it's an IDE controller and we ignore it.
1482 for (bar=0; bar<6; bar++)
1483 if (pci_resource_start(pdev, bar) == 0)
1486 if (!printed_version++)
1487 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1489 rc = pcim_enable_device(pdev);
1493 rc = pci_request_regions(pdev, DRV_NAME);
1495 pcim_pin_device(pdev);
1499 if(type >= CK804 && adma_enabled) {
1500 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1502 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1503 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1508 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1511 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1518 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1522 ppi[0] = ppi[1] = &nv_port_info[type];
1523 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1527 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1529 probe_ent->iomap = pcim_iomap_table(pdev);
1531 probe_ent->private_data = hpriv;
1534 base = probe_ent->iomap[NV_MMIO_BAR];
1535 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1536 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1538 /* enable SATA space for CK804 */
1539 if (type >= CK804) {
1542 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1543 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1544 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1547 pci_set_master(pdev);
1550 rc = nv_adma_host_init(probe_ent);
1555 rc = ata_device_add(probe_ent);
1559 devm_kfree(&pdev->dev, probe_ent);
1563 static void nv_remove_one (struct pci_dev *pdev)
1565 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1566 struct nv_host_priv *hpriv = host->private_data;
1568 ata_pci_remove_one(pdev);
1573 static int nv_pci_device_resume(struct pci_dev *pdev)
1575 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1576 struct nv_host_priv *hpriv = host->private_data;
1579 rc = ata_pci_device_do_resume(pdev);
1583 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1584 if(hpriv->type >= CK804) {
1587 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1588 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1589 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1591 if(hpriv->type == ADMA) {
1593 struct nv_adma_port_priv *pp;
1594 /* enable/disable ADMA on the ports appropriately */
1595 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1597 pp = host->ports[0]->private_data;
1598 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1599 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1600 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1602 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1603 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1604 pp = host->ports[1]->private_data;
1605 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1606 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1607 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1609 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1610 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1612 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1616 ata_host_resume(host);
1622 static void nv_ck804_host_stop(struct ata_host *host)
1624 struct pci_dev *pdev = to_pci_dev(host->dev);
1627 /* disable SATA space for CK804 */
1628 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1629 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1630 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1633 static void nv_adma_host_stop(struct ata_host *host)
1635 struct pci_dev *pdev = to_pci_dev(host->dev);
1638 /* disable ADMA on the ports */
1639 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1640 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1641 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1642 NV_MCP_SATA_CFG_20_PORT1_EN |
1643 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1645 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1647 nv_ck804_host_stop(host);
1650 static int __init nv_init(void)
1652 return pci_register_driver(&nv_pci_driver);
1655 static void __exit nv_exit(void)
1657 pci_unregister_driver(&nv_pci_driver);
1660 module_init(nv_init);
1661 module_exit(nv_exit);
1662 module_param_named(adma, adma_enabled, bool, 0444);
1663 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");