2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
245 struct nv_adma_prd *aprd;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
255 struct nv_host_priv {
263 unsigned int tag[ATA_MAX_QUEUE];
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
281 unsigned int last_issue_tag;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 static int nv_pci_device_resume(struct pci_dev *pdev);
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static void nv_error_handler(struct ata_port *ap);
313 static int nv_adma_slave_config(struct scsi_device *sdev);
314 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
315 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318 static void nv_adma_irq_clear(struct ata_port *ap);
319 static int nv_adma_port_start(struct ata_port *ap);
320 static void nv_adma_port_stop(struct ata_port *ap);
322 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323 static int nv_adma_port_resume(struct ata_port *ap);
325 static void nv_adma_freeze(struct ata_port *ap);
326 static void nv_adma_thaw(struct ata_port *ap);
327 static void nv_adma_error_handler(struct ata_port *ap);
328 static void nv_adma_host_stop(struct ata_host *host);
329 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
330 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 static void nv_mcp55_thaw(struct ata_port *ap);
333 static void nv_mcp55_freeze(struct ata_port *ap);
334 static void nv_swncq_error_handler(struct ata_port *ap);
335 static int nv_swncq_slave_config(struct scsi_device *sdev);
336 static int nv_swncq_port_start(struct ata_port *ap);
337 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344 static int nv_swncq_port_resume(struct ata_port *ap);
351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
357 static const struct pci_device_id nv_pci_tbl[] = {
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
373 { } /* terminate list */
376 static struct pci_driver nv_pci_driver = {
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
384 .remove = ata_pci_remove_one,
387 static struct scsi_host_template nv_sht = {
388 ATA_BMDMA_SHT(DRV_NAME),
391 static struct scsi_host_template nv_adma_sht = {
392 ATA_NCQ_SHT(DRV_NAME),
393 .can_queue = NV_ADMA_MAX_CPBS,
394 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
395 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
396 .slave_configure = nv_adma_slave_config,
399 static struct scsi_host_template nv_swncq_sht = {
400 ATA_NCQ_SHT(DRV_NAME),
401 .can_queue = ATA_MAX_QUEUE,
402 .sg_tablesize = LIBATA_MAX_PRD,
403 .dma_boundary = ATA_DMA_BOUNDARY,
404 .slave_configure = nv_swncq_slave_config,
407 static struct ata_port_operations nv_generic_ops = {
408 .inherits = &ata_bmdma_port_ops,
409 .error_handler = nv_error_handler,
410 .scr_read = nv_scr_read,
411 .scr_write = nv_scr_write,
414 static struct ata_port_operations nv_nf2_ops = {
415 .inherits = &nv_generic_ops,
416 .freeze = nv_nf2_freeze,
420 static struct ata_port_operations nv_ck804_ops = {
421 .inherits = &nv_generic_ops,
422 .freeze = nv_ck804_freeze,
423 .thaw = nv_ck804_thaw,
424 .host_stop = nv_ck804_host_stop,
427 static struct ata_port_operations nv_adma_ops = {
428 .inherits = &nv_generic_ops,
430 .check_atapi_dma = nv_adma_check_atapi_dma,
431 .tf_read = nv_adma_tf_read,
432 .qc_defer = ata_std_qc_defer,
433 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue,
435 .irq_clear = nv_adma_irq_clear,
437 .freeze = nv_adma_freeze,
438 .thaw = nv_adma_thaw,
439 .error_handler = nv_adma_error_handler,
440 .post_internal_cmd = nv_adma_post_internal_cmd,
442 .port_start = nv_adma_port_start,
443 .port_stop = nv_adma_port_stop,
445 .port_suspend = nv_adma_port_suspend,
446 .port_resume = nv_adma_port_resume,
448 .host_stop = nv_adma_host_stop,
451 static struct ata_port_operations nv_swncq_ops = {
452 .inherits = &nv_generic_ops,
454 .qc_defer = ata_std_qc_defer,
455 .qc_prep = nv_swncq_qc_prep,
456 .qc_issue = nv_swncq_qc_issue,
458 .freeze = nv_mcp55_freeze,
459 .thaw = nv_mcp55_thaw,
460 .error_handler = nv_swncq_error_handler,
463 .port_suspend = nv_swncq_port_suspend,
464 .port_resume = nv_swncq_port_resume,
466 .port_start = nv_swncq_port_start,
470 irq_handler_t irq_handler;
471 struct scsi_host_template *sht;
474 #define NV_PI_PRIV(_irq_handler, _sht) \
475 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
477 static const struct ata_port_info nv_port_info[] = {
480 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
481 .pio_mask = NV_PIO_MASK,
482 .mwdma_mask = NV_MWDMA_MASK,
483 .udma_mask = NV_UDMA_MASK,
484 .port_ops = &nv_generic_ops,
485 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
489 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
490 .pio_mask = NV_PIO_MASK,
491 .mwdma_mask = NV_MWDMA_MASK,
492 .udma_mask = NV_UDMA_MASK,
493 .port_ops = &nv_nf2_ops,
494 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
498 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
499 .pio_mask = NV_PIO_MASK,
500 .mwdma_mask = NV_MWDMA_MASK,
501 .udma_mask = NV_UDMA_MASK,
502 .port_ops = &nv_ck804_ops,
503 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
507 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
508 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
509 .pio_mask = NV_PIO_MASK,
510 .mwdma_mask = NV_MWDMA_MASK,
511 .udma_mask = NV_UDMA_MASK,
512 .port_ops = &nv_adma_ops,
513 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
517 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
519 .pio_mask = NV_PIO_MASK,
520 .mwdma_mask = NV_MWDMA_MASK,
521 .udma_mask = NV_UDMA_MASK,
522 .port_ops = &nv_swncq_ops,
523 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
527 MODULE_AUTHOR("NVIDIA");
528 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
529 MODULE_LICENSE("GPL");
530 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
531 MODULE_VERSION(DRV_VERSION);
533 static int adma_enabled = 1;
534 static int swncq_enabled;
536 static void nv_adma_register_mode(struct ata_port *ap)
538 struct nv_adma_port_priv *pp = ap->private_data;
539 void __iomem *mmio = pp->ctl_block;
543 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
546 status = readw(mmio + NV_ADMA_STAT);
547 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
549 status = readw(mmio + NV_ADMA_STAT);
553 ata_port_printk(ap, KERN_WARNING,
554 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
557 tmp = readw(mmio + NV_ADMA_CTL);
558 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
561 status = readw(mmio + NV_ADMA_STAT);
562 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
564 status = readw(mmio + NV_ADMA_STAT);
568 ata_port_printk(ap, KERN_WARNING,
569 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
572 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
575 static void nv_adma_mode(struct ata_port *ap)
577 struct nv_adma_port_priv *pp = ap->private_data;
578 void __iomem *mmio = pp->ctl_block;
582 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
585 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
587 tmp = readw(mmio + NV_ADMA_CTL);
588 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
590 status = readw(mmio + NV_ADMA_STAT);
591 while (((status & NV_ADMA_STAT_LEGACY) ||
592 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
594 status = readw(mmio + NV_ADMA_STAT);
598 ata_port_printk(ap, KERN_WARNING,
599 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
602 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
605 static int nv_adma_slave_config(struct scsi_device *sdev)
607 struct ata_port *ap = ata_shost_to_port(sdev->host);
608 struct nv_adma_port_priv *pp = ap->private_data;
609 struct nv_adma_port_priv *port0, *port1;
610 struct scsi_device *sdev0, *sdev1;
611 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
612 unsigned long segment_boundary, flags;
613 unsigned short sg_tablesize;
616 u32 current_reg, new_reg, config_mask;
618 rc = ata_scsi_slave_config(sdev);
620 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
621 /* Not a proper libata device, ignore */
624 spin_lock_irqsave(ap->lock, flags);
626 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
628 * NVIDIA reports that ADMA mode does not support ATAPI commands.
629 * Therefore ATAPI commands are sent through the legacy interface.
630 * However, the legacy interface only supports 32-bit DMA.
631 * Restrict DMA parameters as required by the legacy interface
632 * when an ATAPI device is connected.
634 segment_boundary = ATA_DMA_BOUNDARY;
635 /* Subtract 1 since an extra entry may be needed for padding, see
637 sg_tablesize = LIBATA_MAX_PRD - 1;
639 /* Since the legacy DMA engine is in use, we need to disable ADMA
642 nv_adma_register_mode(ap);
644 segment_boundary = NV_ADMA_DMA_BOUNDARY;
645 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
649 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
651 if (ap->port_no == 1)
652 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
653 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
655 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
656 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
659 new_reg = current_reg | config_mask;
660 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
662 new_reg = current_reg & ~config_mask;
663 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
666 if (current_reg != new_reg)
667 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
669 port0 = ap->host->ports[0]->private_data;
670 port1 = ap->host->ports[1]->private_data;
671 sdev0 = ap->host->ports[0]->link.device[0].sdev;
672 sdev1 = ap->host->ports[1]->link.device[0].sdev;
673 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
674 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
675 /** We have to set the DMA mask to 32-bit if either port is in
676 ATAPI mode, since they are on the same PCI device which is
677 used for DMA mapping. If we set the mask we also need to set
678 the bounce limit on both ports to ensure that the block
679 layer doesn't feed addresses that cause DMA mapping to
680 choke. If either SCSI device is not allocated yet, it's OK
681 since that port will discover its correct setting when it
683 Note: Setting 32-bit mask should not fail. */
685 blk_queue_bounce_limit(sdev0->request_queue,
688 blk_queue_bounce_limit(sdev1->request_queue,
691 pci_set_dma_mask(pdev, ATA_DMA_MASK);
693 /** This shouldn't fail as it was set to this value before */
694 pci_set_dma_mask(pdev, pp->adma_dma_mask);
696 blk_queue_bounce_limit(sdev0->request_queue,
699 blk_queue_bounce_limit(sdev1->request_queue,
703 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
704 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
705 ata_port_printk(ap, KERN_INFO,
706 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
707 (unsigned long long)*ap->host->dev->dma_mask,
708 segment_boundary, sg_tablesize);
710 spin_unlock_irqrestore(ap->lock, flags);
715 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
717 struct nv_adma_port_priv *pp = qc->ap->private_data;
718 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
721 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
723 /* Other than when internal or pass-through commands are executed,
724 the only time this function will be called in ADMA mode will be
725 if a command fails. In the failure case we don't care about going
726 into register mode with ADMA commands pending, as the commands will
727 all shortly be aborted anyway. We assume that NCQ commands are not
728 issued via passthrough, which is the only way that switching into
729 ADMA mode could abort outstanding commands. */
730 nv_adma_register_mode(ap);
735 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
737 unsigned int idx = 0;
739 if (tf->flags & ATA_TFLAG_ISADDR) {
740 if (tf->flags & ATA_TFLAG_LBA48) {
741 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
742 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
743 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
744 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
745 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
746 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
748 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
750 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
751 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
752 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
753 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
756 if (tf->flags & ATA_TFLAG_DEVICE)
757 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
759 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
762 cpb[idx++] = cpu_to_le16(IGN);
767 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
769 struct nv_adma_port_priv *pp = ap->private_data;
770 u8 flags = pp->cpb[cpb_num].resp_flags;
772 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
774 if (unlikely((force_err ||
775 flags & (NV_CPB_RESP_ATA_ERR |
776 NV_CPB_RESP_CMD_ERR |
777 NV_CPB_RESP_CPB_ERR)))) {
778 struct ata_eh_info *ehi = &ap->link.eh_info;
781 ata_ehi_clear_desc(ehi);
782 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
783 if (flags & NV_CPB_RESP_ATA_ERR) {
784 ata_ehi_push_desc(ehi, "ATA error");
785 ehi->err_mask |= AC_ERR_DEV;
786 } else if (flags & NV_CPB_RESP_CMD_ERR) {
787 ata_ehi_push_desc(ehi, "CMD error");
788 ehi->err_mask |= AC_ERR_DEV;
789 } else if (flags & NV_CPB_RESP_CPB_ERR) {
790 ata_ehi_push_desc(ehi, "CPB error");
791 ehi->err_mask |= AC_ERR_SYSTEM;
794 /* notifier error, but no error in CPB flags? */
795 ata_ehi_push_desc(ehi, "unknown");
796 ehi->err_mask |= AC_ERR_OTHER;
799 /* Kill all commands. EH will determine what actually failed. */
807 if (likely(flags & NV_CPB_RESP_DONE)) {
808 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
809 VPRINTK("CPB flags done, flags=0x%x\n", flags);
811 DPRINTK("Completing qc from tag %d\n", cpb_num);
814 struct ata_eh_info *ehi = &ap->link.eh_info;
815 /* Notifier bits set without a command may indicate the drive
816 is misbehaving. Raise host state machine violation on this
818 ata_port_printk(ap, KERN_ERR,
819 "notifier for tag %d with no cmd?\n",
821 ehi->err_mask |= AC_ERR_HSM;
822 ehi->action |= ATA_EH_RESET;
830 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
832 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
834 /* freeze if hotplugged */
835 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
840 /* bail out if not our interrupt */
841 if (!(irq_stat & NV_INT_DEV))
844 /* DEV interrupt w/ no active qc? */
845 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
846 ata_check_status(ap);
850 /* handle interrupt */
851 return ata_host_intr(ap, qc);
854 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
856 struct ata_host *host = dev_instance;
858 u32 notifier_clears[2];
860 spin_lock(&host->lock);
862 for (i = 0; i < host->n_ports; i++) {
863 struct ata_port *ap = host->ports[i];
864 notifier_clears[i] = 0;
866 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
867 struct nv_adma_port_priv *pp = ap->private_data;
868 void __iomem *mmio = pp->ctl_block;
871 u32 notifier, notifier_error;
873 /* if ADMA is disabled, use standard ata interrupt handler */
874 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
875 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
876 >> (NV_INT_PORT_SHIFT * i);
877 handled += nv_host_intr(ap, irq_stat);
881 /* if in ATA register mode, check for standard interrupts */
882 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
883 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
884 >> (NV_INT_PORT_SHIFT * i);
885 if (ata_tag_valid(ap->link.active_tag))
886 /** NV_INT_DEV indication seems unreliable at times
887 at least in ADMA mode. Force it on always when a
888 command is active, to prevent losing interrupts. */
889 irq_stat |= NV_INT_DEV;
890 handled += nv_host_intr(ap, irq_stat);
893 notifier = readl(mmio + NV_ADMA_NOTIFIER);
894 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
895 notifier_clears[i] = notifier | notifier_error;
897 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
899 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
904 status = readw(mmio + NV_ADMA_STAT);
906 /* Clear status. Ensure the controller sees the clearing before we start
907 looking at any of the CPB statuses, so that any CPB completions after
908 this point in the handler will raise another interrupt. */
909 writew(status, mmio + NV_ADMA_STAT);
910 readw(mmio + NV_ADMA_STAT); /* flush posted write */
913 handled++; /* irq handled if we got here */
915 /* freeze if hotplugged or controller error */
916 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
917 NV_ADMA_STAT_HOTUNPLUG |
918 NV_ADMA_STAT_TIMEOUT |
919 NV_ADMA_STAT_SERROR))) {
920 struct ata_eh_info *ehi = &ap->link.eh_info;
922 ata_ehi_clear_desc(ehi);
923 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
924 if (status & NV_ADMA_STAT_TIMEOUT) {
925 ehi->err_mask |= AC_ERR_SYSTEM;
926 ata_ehi_push_desc(ehi, "timeout");
927 } else if (status & NV_ADMA_STAT_HOTPLUG) {
928 ata_ehi_hotplugged(ehi);
929 ata_ehi_push_desc(ehi, "hotplug");
930 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
931 ata_ehi_hotplugged(ehi);
932 ata_ehi_push_desc(ehi, "hot unplug");
933 } else if (status & NV_ADMA_STAT_SERROR) {
934 /* let libata analyze SError and figure out the cause */
935 ata_ehi_push_desc(ehi, "SError");
937 ata_ehi_push_desc(ehi, "unknown");
942 if (status & (NV_ADMA_STAT_DONE |
943 NV_ADMA_STAT_CPBERR |
944 NV_ADMA_STAT_CMD_COMPLETE)) {
945 u32 check_commands = notifier_clears[i];
948 if (status & NV_ADMA_STAT_CPBERR) {
949 /* Check all active commands */
950 if (ata_tag_valid(ap->link.active_tag))
951 check_commands = 1 <<
954 check_commands = ap->
958 /** Check CPBs for completed commands */
959 while ((pos = ffs(check_commands)) && !error) {
961 error = nv_adma_check_cpb(ap, pos,
962 notifier_error & (1 << pos));
963 check_commands &= ~(1 << pos);
969 if (notifier_clears[0] || notifier_clears[1]) {
970 /* Note: Both notifier clear registers must be written
971 if either is set, even if one is zero, according to NVIDIA. */
972 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
973 writel(notifier_clears[0], pp->notifier_clear_block);
974 pp = host->ports[1]->private_data;
975 writel(notifier_clears[1], pp->notifier_clear_block);
978 spin_unlock(&host->lock);
980 return IRQ_RETVAL(handled);
983 static void nv_adma_freeze(struct ata_port *ap)
985 struct nv_adma_port_priv *pp = ap->private_data;
986 void __iomem *mmio = pp->ctl_block;
991 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
994 /* clear any outstanding CK804 notifications */
995 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
996 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
998 /* Disable interrupt */
999 tmp = readw(mmio + NV_ADMA_CTL);
1000 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1001 mmio + NV_ADMA_CTL);
1002 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1005 static void nv_adma_thaw(struct ata_port *ap)
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1016 /* Enable interrupt */
1017 tmp = readw(mmio + NV_ADMA_CTL);
1018 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1019 mmio + NV_ADMA_CTL);
1020 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1023 static void nv_adma_irq_clear(struct ata_port *ap)
1025 struct nv_adma_port_priv *pp = ap->private_data;
1026 void __iomem *mmio = pp->ctl_block;
1027 u32 notifier_clears[2];
1029 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1030 ata_bmdma_irq_clear(ap);
1034 /* clear any outstanding CK804 notifications */
1035 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1036 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1038 /* clear ADMA status */
1039 writew(0xffff, mmio + NV_ADMA_STAT);
1041 /* clear notifiers - note both ports need to be written with
1042 something even though we are only clearing on one */
1043 if (ap->port_no == 0) {
1044 notifier_clears[0] = 0xFFFFFFFF;
1045 notifier_clears[1] = 0;
1047 notifier_clears[0] = 0;
1048 notifier_clears[1] = 0xFFFFFFFF;
1050 pp = ap->host->ports[0]->private_data;
1051 writel(notifier_clears[0], pp->notifier_clear_block);
1052 pp = ap->host->ports[1]->private_data;
1053 writel(notifier_clears[1], pp->notifier_clear_block);
1056 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1058 struct nv_adma_port_priv *pp = qc->ap->private_data;
1060 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1061 ata_bmdma_post_internal_cmd(qc);
1064 static int nv_adma_port_start(struct ata_port *ap)
1066 struct device *dev = ap->host->dev;
1067 struct nv_adma_port_priv *pp;
1072 struct pci_dev *pdev = to_pci_dev(dev);
1077 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1079 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1082 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1086 rc = ata_port_start(ap);
1090 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1094 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1095 ap->port_no * NV_ADMA_PORT_SIZE;
1096 pp->ctl_block = mmio;
1097 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1098 pp->notifier_clear_block = pp->gen_block +
1099 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1101 /* Now that the legacy PRD and padding buffer are allocated we can
1102 safely raise the DMA mask to allocate the CPB/APRD table.
1103 These are allowed to fail since we store the value that ends up
1104 being used to set as the bounce limit in slave_config later if
1106 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1107 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1108 pp->adma_dma_mask = *dev->dma_mask;
1110 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1111 &mem_dma, GFP_KERNEL);
1114 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1117 * First item in chunk of DMA memory:
1118 * 128-byte command parameter block (CPB)
1119 * one for each command tag
1122 pp->cpb_dma = mem_dma;
1124 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1125 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1127 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1128 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1131 * Second item: block of ADMA_SGTBL_LEN s/g entries
1134 pp->aprd_dma = mem_dma;
1136 ap->private_data = pp;
1138 /* clear any outstanding interrupt conditions */
1139 writew(0xffff, mmio + NV_ADMA_STAT);
1141 /* initialize port variables */
1142 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1144 /* clear CPB fetch count */
1145 writew(0, mmio + NV_ADMA_CPB_COUNT);
1147 /* clear GO for register mode, enable interrupt */
1148 tmp = readw(mmio + NV_ADMA_CTL);
1149 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1150 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1152 tmp = readw(mmio + NV_ADMA_CTL);
1153 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1154 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1156 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1157 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1162 static void nv_adma_port_stop(struct ata_port *ap)
1164 struct nv_adma_port_priv *pp = ap->private_data;
1165 void __iomem *mmio = pp->ctl_block;
1168 writew(0, mmio + NV_ADMA_CTL);
1172 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1174 struct nv_adma_port_priv *pp = ap->private_data;
1175 void __iomem *mmio = pp->ctl_block;
1177 /* Go to register mode - clears GO */
1178 nv_adma_register_mode(ap);
1180 /* clear CPB fetch count */
1181 writew(0, mmio + NV_ADMA_CPB_COUNT);
1183 /* disable interrupt, shut down port */
1184 writew(0, mmio + NV_ADMA_CTL);
1189 static int nv_adma_port_resume(struct ata_port *ap)
1191 struct nv_adma_port_priv *pp = ap->private_data;
1192 void __iomem *mmio = pp->ctl_block;
1195 /* set CPB block location */
1196 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1197 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1199 /* clear any outstanding interrupt conditions */
1200 writew(0xffff, mmio + NV_ADMA_STAT);
1202 /* initialize port variables */
1203 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1205 /* clear CPB fetch count */
1206 writew(0, mmio + NV_ADMA_CPB_COUNT);
1208 /* clear GO for register mode, enable interrupt */
1209 tmp = readw(mmio + NV_ADMA_CTL);
1210 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1211 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1213 tmp = readw(mmio + NV_ADMA_CTL);
1214 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1215 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1217 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1218 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1224 static void nv_adma_setup_port(struct ata_port *ap)
1226 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1227 struct ata_ioports *ioport = &ap->ioaddr;
1231 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1233 ioport->cmd_addr = mmio;
1234 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1235 ioport->error_addr =
1236 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1237 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1238 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1239 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1240 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1241 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1242 ioport->status_addr =
1243 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1244 ioport->altstatus_addr =
1245 ioport->ctl_addr = mmio + 0x20;
1248 static int nv_adma_host_init(struct ata_host *host)
1250 struct pci_dev *pdev = to_pci_dev(host->dev);
1256 /* enable ADMA on the ports */
1257 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1258 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1259 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1260 NV_MCP_SATA_CFG_20_PORT1_EN |
1261 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1263 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1265 for (i = 0; i < host->n_ports; i++)
1266 nv_adma_setup_port(host->ports[i]);
1271 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1272 struct scatterlist *sg,
1274 struct nv_adma_prd *aprd)
1277 if (qc->tf.flags & ATA_TFLAG_WRITE)
1278 flags |= NV_APRD_WRITE;
1279 if (idx == qc->n_elem - 1)
1280 flags |= NV_APRD_END;
1282 flags |= NV_APRD_CONT;
1284 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1285 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1286 aprd->flags = flags;
1287 aprd->packet_len = 0;
1290 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1292 struct nv_adma_port_priv *pp = qc->ap->private_data;
1293 struct nv_adma_prd *aprd;
1294 struct scatterlist *sg;
1299 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1300 aprd = (si < 5) ? &cpb->aprd[si] :
1301 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1302 nv_adma_fill_aprd(qc, sg, si, aprd);
1305 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1307 cpb->next_aprd = cpu_to_le64(0);
1310 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
1314 /* ADMA engine can only be used for non-ATAPI DMA commands,
1315 or interrupt-driven no-data commands. */
1316 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1317 (qc->tf.flags & ATA_TFLAG_POLLING))
1320 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1321 (qc->tf.protocol == ATA_PROT_NODATA))
1327 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1329 struct nv_adma_port_priv *pp = qc->ap->private_data;
1330 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1331 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1334 if (nv_adma_use_reg_mode(qc)) {
1335 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1336 (qc->flags & ATA_QCFLAG_DMAMAP));
1337 nv_adma_register_mode(qc->ap);
1342 cpb->resp_flags = NV_CPB_RESP_DONE;
1349 cpb->next_cpb_idx = 0;
1351 /* turn on NCQ flags for NCQ commands */
1352 if (qc->tf.protocol == ATA_PROT_NCQ)
1353 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1355 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1357 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1359 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1360 nv_adma_fill_sg(qc, cpb);
1361 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1363 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1365 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1366 until we are finished filling in all of the contents */
1368 cpb->ctl_flags = ctl_flags;
1370 cpb->resp_flags = 0;
1373 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1375 struct nv_adma_port_priv *pp = qc->ap->private_data;
1376 void __iomem *mmio = pp->ctl_block;
1377 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1381 /* We can't handle result taskfile with NCQ commands, since
1382 retrieving the taskfile switches us out of ADMA mode and would abort
1383 existing commands. */
1384 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1385 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1386 ata_dev_printk(qc->dev, KERN_ERR,
1387 "NCQ w/ RESULT_TF not allowed\n");
1388 return AC_ERR_SYSTEM;
1391 if (nv_adma_use_reg_mode(qc)) {
1392 /* use ATA register mode */
1393 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1394 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1395 (qc->flags & ATA_QCFLAG_DMAMAP));
1396 nv_adma_register_mode(qc->ap);
1397 return ata_qc_issue_prot(qc);
1399 nv_adma_mode(qc->ap);
1401 /* write append register, command tag in lower 8 bits
1402 and (number of cpbs to append -1) in top 8 bits */
1405 if (curr_ncq != pp->last_issue_ncq) {
1406 /* Seems to need some delay before switching between NCQ and
1407 non-NCQ commands, else we get command timeouts and such. */
1409 pp->last_issue_ncq = curr_ncq;
1412 writew(qc->tag, mmio + NV_ADMA_APPEND);
1414 DPRINTK("Issued tag %u\n", qc->tag);
1419 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1421 struct ata_host *host = dev_instance;
1423 unsigned int handled = 0;
1424 unsigned long flags;
1426 spin_lock_irqsave(&host->lock, flags);
1428 for (i = 0; i < host->n_ports; i++) {
1429 struct ata_port *ap;
1431 ap = host->ports[i];
1433 !(ap->flags & ATA_FLAG_DISABLED)) {
1434 struct ata_queued_cmd *qc;
1436 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1437 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1438 handled += ata_host_intr(ap, qc);
1440 // No request pending? Clear interrupt status
1441 // anyway, in case there's one pending.
1442 ap->ops->check_status(ap);
1447 spin_unlock_irqrestore(&host->lock, flags);
1449 return IRQ_RETVAL(handled);
1452 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1456 for (i = 0; i < host->n_ports; i++) {
1457 struct ata_port *ap = host->ports[i];
1459 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1460 handled += nv_host_intr(ap, irq_stat);
1462 irq_stat >>= NV_INT_PORT_SHIFT;
1465 return IRQ_RETVAL(handled);
1468 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1470 struct ata_host *host = dev_instance;
1474 spin_lock(&host->lock);
1475 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1476 ret = nv_do_interrupt(host, irq_stat);
1477 spin_unlock(&host->lock);
1482 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1484 struct ata_host *host = dev_instance;
1488 spin_lock(&host->lock);
1489 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1490 ret = nv_do_interrupt(host, irq_stat);
1491 spin_unlock(&host->lock);
1496 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1498 if (sc_reg > SCR_CONTROL)
1501 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1505 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1507 if (sc_reg > SCR_CONTROL)
1510 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1514 static void nv_nf2_freeze(struct ata_port *ap)
1516 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1517 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1520 mask = ioread8(scr_addr + NV_INT_ENABLE);
1521 mask &= ~(NV_INT_ALL << shift);
1522 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1525 static void nv_nf2_thaw(struct ata_port *ap)
1527 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1528 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1531 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1533 mask = ioread8(scr_addr + NV_INT_ENABLE);
1534 mask |= (NV_INT_MASK << shift);
1535 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1538 static void nv_ck804_freeze(struct ata_port *ap)
1540 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1541 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1544 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1545 mask &= ~(NV_INT_ALL << shift);
1546 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1549 static void nv_ck804_thaw(struct ata_port *ap)
1551 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1552 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1557 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1558 mask |= (NV_INT_MASK << shift);
1559 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1562 static void nv_mcp55_freeze(struct ata_port *ap)
1564 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1565 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1568 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1570 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1571 mask &= ~(NV_INT_ALL_MCP55 << shift);
1572 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1573 ata_bmdma_freeze(ap);
1576 static void nv_mcp55_thaw(struct ata_port *ap)
1578 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1579 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1582 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1584 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1585 mask |= (NV_INT_MASK_MCP55 << shift);
1586 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1590 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1591 unsigned long deadline)
1595 /* SATA hardreset fails to retrieve proper device signature on
1596 * some controllers. Don't classify on hardreset. For more
1597 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1599 return sata_std_hardreset(link, &dummy, deadline);
1602 static void nv_error_handler(struct ata_port *ap)
1604 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1605 nv_hardreset, ata_std_postreset);
1608 static void nv_adma_error_handler(struct ata_port *ap)
1610 struct nv_adma_port_priv *pp = ap->private_data;
1611 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1612 void __iomem *mmio = pp->ctl_block;
1616 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1617 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1618 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1619 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1620 u32 status = readw(mmio + NV_ADMA_STAT);
1621 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1622 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1624 ata_port_printk(ap, KERN_ERR,
1625 "EH in ADMA mode, notifier 0x%X "
1626 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1627 "next cpb count 0x%X next cpb idx 0x%x\n",
1628 notifier, notifier_error, gen_ctl, status,
1629 cpb_count, next_cpb_idx);
1631 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1632 struct nv_adma_cpb *cpb = &pp->cpb[i];
1633 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1634 ap->link.sactive & (1 << i))
1635 ata_port_printk(ap, KERN_ERR,
1636 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1637 i, cpb->ctl_flags, cpb->resp_flags);
1641 /* Push us back into port register mode for error handling. */
1642 nv_adma_register_mode(ap);
1644 /* Mark all of the CPBs as invalid to prevent them from
1646 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1647 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1649 /* clear CPB fetch count */
1650 writew(0, mmio + NV_ADMA_CPB_COUNT);
1653 tmp = readw(mmio + NV_ADMA_CTL);
1654 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1655 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1657 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1658 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1661 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1662 nv_hardreset, ata_std_postreset);
1665 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1667 struct nv_swncq_port_priv *pp = ap->private_data;
1668 struct defer_queue *dq = &pp->defer_queue;
1671 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1672 dq->defer_bits |= (1 << qc->tag);
1673 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1676 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1678 struct nv_swncq_port_priv *pp = ap->private_data;
1679 struct defer_queue *dq = &pp->defer_queue;
1682 if (dq->head == dq->tail) /* null queue */
1685 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1686 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1687 WARN_ON(!(dq->defer_bits & (1 << tag)));
1688 dq->defer_bits &= ~(1 << tag);
1690 return ata_qc_from_tag(ap, tag);
1693 static void nv_swncq_fis_reinit(struct ata_port *ap)
1695 struct nv_swncq_port_priv *pp = ap->private_data;
1698 pp->dmafis_bits = 0;
1699 pp->sdbfis_bits = 0;
1703 static void nv_swncq_pp_reinit(struct ata_port *ap)
1705 struct nv_swncq_port_priv *pp = ap->private_data;
1706 struct defer_queue *dq = &pp->defer_queue;
1712 pp->last_issue_tag = ATA_TAG_POISON;
1713 nv_swncq_fis_reinit(ap);
1716 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1718 struct nv_swncq_port_priv *pp = ap->private_data;
1720 writew(fis, pp->irq_block);
1723 static void __ata_bmdma_stop(struct ata_port *ap)
1725 struct ata_queued_cmd qc;
1728 ata_bmdma_stop(&qc);
1731 static void nv_swncq_ncq_stop(struct ata_port *ap)
1733 struct nv_swncq_port_priv *pp = ap->private_data;
1738 ata_port_printk(ap, KERN_ERR,
1739 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1740 ap->qc_active, ap->link.sactive);
1741 ata_port_printk(ap, KERN_ERR,
1742 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1743 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1744 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1745 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1747 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1748 ap->ops->check_status(ap),
1749 ioread8(ap->ioaddr.error_addr));
1751 sactive = readl(pp->sactive_block);
1752 done_mask = pp->qc_active ^ sactive;
1754 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1755 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1757 if (pp->qc_active & (1 << i))
1759 else if (done_mask & (1 << i))
1764 ata_port_printk(ap, KERN_ERR,
1765 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1766 (pp->dhfis_bits >> i) & 0x1,
1767 (pp->dmafis_bits >> i) & 0x1,
1768 (pp->sdbfis_bits >> i) & 0x1,
1769 (sactive >> i) & 0x1,
1770 (err ? "error! tag doesn't exit" : " "));
1773 nv_swncq_pp_reinit(ap);
1774 ap->ops->irq_clear(ap);
1775 __ata_bmdma_stop(ap);
1776 nv_swncq_irq_clear(ap, 0xffff);
1779 static void nv_swncq_error_handler(struct ata_port *ap)
1781 struct ata_eh_context *ehc = &ap->link.eh_context;
1783 if (ap->link.sactive) {
1784 nv_swncq_ncq_stop(ap);
1785 ehc->i.action |= ATA_EH_RESET;
1788 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1789 nv_hardreset, ata_std_postreset);
1793 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1795 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1799 writel(~0, mmio + NV_INT_STATUS_MCP55);
1802 writel(0, mmio + NV_INT_ENABLE_MCP55);
1805 tmp = readl(mmio + NV_CTL_MCP55);
1806 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1807 writel(tmp, mmio + NV_CTL_MCP55);
1812 static int nv_swncq_port_resume(struct ata_port *ap)
1814 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1818 writel(~0, mmio + NV_INT_STATUS_MCP55);
1821 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1824 tmp = readl(mmio + NV_CTL_MCP55);
1825 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1831 static void nv_swncq_host_init(struct ata_host *host)
1834 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1835 struct pci_dev *pdev = to_pci_dev(host->dev);
1838 /* disable ECO 398 */
1839 pci_read_config_byte(pdev, 0x7f, ®val);
1840 regval &= ~(1 << 7);
1841 pci_write_config_byte(pdev, 0x7f, regval);
1844 tmp = readl(mmio + NV_CTL_MCP55);
1845 VPRINTK("HOST_CTL:0x%X\n", tmp);
1846 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1848 /* enable irq intr */
1849 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1850 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1851 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1853 /* clear port irq */
1854 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1857 static int nv_swncq_slave_config(struct scsi_device *sdev)
1859 struct ata_port *ap = ata_shost_to_port(sdev->host);
1860 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1861 struct ata_device *dev;
1864 u8 check_maxtor = 0;
1865 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1867 rc = ata_scsi_slave_config(sdev);
1868 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1869 /* Not a proper libata device, ignore */
1872 dev = &ap->link.device[sdev->id];
1873 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1876 /* if MCP51 and Maxtor, then disable ncq */
1877 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1878 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1881 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1882 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1883 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1884 pci_read_config_byte(pdev, 0x8, &rev);
1892 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1894 if (strncmp(model_num, "Maxtor", 6) == 0) {
1895 ata_scsi_change_queue_depth(sdev, 1);
1896 ata_dev_printk(dev, KERN_NOTICE,
1897 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1903 static int nv_swncq_port_start(struct ata_port *ap)
1905 struct device *dev = ap->host->dev;
1906 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1907 struct nv_swncq_port_priv *pp;
1910 rc = ata_port_start(ap);
1914 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1918 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1919 &pp->prd_dma, GFP_KERNEL);
1922 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1924 ap->private_data = pp;
1925 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1926 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1927 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1932 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1934 if (qc->tf.protocol != ATA_PROT_NCQ) {
1939 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1942 nv_swncq_fill_sg(qc);
1945 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1947 struct ata_port *ap = qc->ap;
1948 struct scatterlist *sg;
1949 struct nv_swncq_port_priv *pp = ap->private_data;
1950 struct ata_prd *prd;
1951 unsigned int si, idx;
1953 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1956 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1960 addr = (u32)sg_dma_address(sg);
1961 sg_len = sg_dma_len(sg);
1964 offset = addr & 0xffff;
1966 if ((offset + sg_len) > 0x10000)
1967 len = 0x10000 - offset;
1969 prd[idx].addr = cpu_to_le32(addr);
1970 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1978 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1981 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1982 struct ata_queued_cmd *qc)
1984 struct nv_swncq_port_priv *pp = ap->private_data;
1991 writel((1 << qc->tag), pp->sactive_block);
1992 pp->last_issue_tag = qc->tag;
1993 pp->dhfis_bits &= ~(1 << qc->tag);
1994 pp->dmafis_bits &= ~(1 << qc->tag);
1995 pp->qc_active |= (0x1 << qc->tag);
1997 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
1998 ap->ops->exec_command(ap, &qc->tf);
2000 DPRINTK("Issued tag %u\n", qc->tag);
2005 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2007 struct ata_port *ap = qc->ap;
2008 struct nv_swncq_port_priv *pp = ap->private_data;
2010 if (qc->tf.protocol != ATA_PROT_NCQ)
2011 return ata_qc_issue_prot(qc);
2016 nv_swncq_issue_atacmd(ap, qc);
2018 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2023 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2026 struct ata_eh_info *ehi = &ap->link.eh_info;
2028 ata_ehi_clear_desc(ehi);
2030 /* AHCI needs SError cleared; otherwise, it might lock up */
2031 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2032 sata_scr_write(&ap->link, SCR_ERROR, serror);
2034 /* analyze @irq_stat */
2035 if (fis & NV_SWNCQ_IRQ_ADDED)
2036 ata_ehi_push_desc(ehi, "hot plug");
2037 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2038 ata_ehi_push_desc(ehi, "hot unplug");
2040 ata_ehi_hotplugged(ehi);
2042 /* okay, let's hand over to EH */
2043 ehi->serror |= serror;
2045 ata_port_freeze(ap);
2048 static int nv_swncq_sdbfis(struct ata_port *ap)
2050 struct ata_queued_cmd *qc;
2051 struct nv_swncq_port_priv *pp = ap->private_data;
2052 struct ata_eh_info *ehi = &ap->link.eh_info;
2060 host_stat = ap->ops->bmdma_status(ap);
2061 if (unlikely(host_stat & ATA_DMA_ERR)) {
2062 /* error when transfering data to/from memory */
2063 ata_ehi_clear_desc(ehi);
2064 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2065 ehi->err_mask |= AC_ERR_HOST_BUS;
2066 ehi->action |= ATA_EH_RESET;
2070 ap->ops->irq_clear(ap);
2071 __ata_bmdma_stop(ap);
2073 sactive = readl(pp->sactive_block);
2074 done_mask = pp->qc_active ^ sactive;
2076 if (unlikely(done_mask & sactive)) {
2077 ata_ehi_clear_desc(ehi);
2078 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2079 "(%08x->%08x)", pp->qc_active, sactive);
2080 ehi->err_mask |= AC_ERR_HSM;
2081 ehi->action |= ATA_EH_RESET;
2084 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2085 if (!(done_mask & (1 << i)))
2088 qc = ata_qc_from_tag(ap, i);
2090 ata_qc_complete(qc);
2091 pp->qc_active &= ~(1 << i);
2092 pp->dhfis_bits &= ~(1 << i);
2093 pp->dmafis_bits &= ~(1 << i);
2094 pp->sdbfis_bits |= (1 << i);
2099 if (!ap->qc_active) {
2101 nv_swncq_pp_reinit(ap);
2105 if (pp->qc_active & pp->dhfis_bits)
2108 if ((pp->ncq_flags & ncq_saw_backout) ||
2109 (pp->qc_active ^ pp->dhfis_bits))
2110 /* if the controller cann't get a device to host register FIS,
2111 * The driver needs to reissue the new command.
2115 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2116 "SWNCQ:qc_active 0x%X defer_bits %X "
2117 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2118 ap->print_id, ap->qc_active, pp->qc_active,
2119 pp->defer_queue.defer_bits, pp->dhfis_bits,
2120 pp->dmafis_bits, pp->last_issue_tag);
2122 nv_swncq_fis_reinit(ap);
2125 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2126 nv_swncq_issue_atacmd(ap, qc);
2130 if (pp->defer_queue.defer_bits) {
2131 /* send deferral queue command */
2132 qc = nv_swncq_qc_from_dq(ap);
2133 WARN_ON(qc == NULL);
2134 nv_swncq_issue_atacmd(ap, qc);
2140 static inline u32 nv_swncq_tag(struct ata_port *ap)
2142 struct nv_swncq_port_priv *pp = ap->private_data;
2145 tag = readb(pp->tag_block) >> 2;
2146 return (tag & 0x1f);
2149 static int nv_swncq_dmafis(struct ata_port *ap)
2151 struct ata_queued_cmd *qc;
2155 struct nv_swncq_port_priv *pp = ap->private_data;
2157 __ata_bmdma_stop(ap);
2158 tag = nv_swncq_tag(ap);
2160 DPRINTK("dma setup tag 0x%x\n", tag);
2161 qc = ata_qc_from_tag(ap, tag);
2166 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2168 /* load PRD table addr. */
2169 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2170 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2172 /* specify data direction, triple-check start bit is clear */
2173 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2174 dmactl &= ~ATA_DMA_WR;
2176 dmactl |= ATA_DMA_WR;
2178 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2183 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2185 struct nv_swncq_port_priv *pp = ap->private_data;
2186 struct ata_queued_cmd *qc;
2187 struct ata_eh_info *ehi = &ap->link.eh_info;
2192 ata_stat = ap->ops->check_status(ap);
2193 nv_swncq_irq_clear(ap, fis);
2197 if (ap->pflags & ATA_PFLAG_FROZEN)
2200 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2201 nv_swncq_hotplug(ap, fis);
2208 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2210 ap->ops->scr_write(ap, SCR_ERROR, serror);
2212 if (ata_stat & ATA_ERR) {
2213 ata_ehi_clear_desc(ehi);
2214 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2215 ehi->err_mask |= AC_ERR_DEV;
2216 ehi->serror |= serror;
2217 ehi->action |= ATA_EH_RESET;
2218 ata_port_freeze(ap);
2222 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2223 /* If the IRQ is backout, driver must issue
2224 * the new command again some time later.
2226 pp->ncq_flags |= ncq_saw_backout;
2229 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2230 pp->ncq_flags |= ncq_saw_sdb;
2231 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2232 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2233 ap->print_id, pp->qc_active, pp->dhfis_bits,
2234 pp->dmafis_bits, readl(pp->sactive_block));
2235 rc = nv_swncq_sdbfis(ap);
2240 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2241 /* The interrupt indicates the new command
2242 * was transmitted correctly to the drive.
2244 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2245 pp->ncq_flags |= ncq_saw_d2h;
2246 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2247 ata_ehi_push_desc(ehi, "illegal fis transaction");
2248 ehi->err_mask |= AC_ERR_HSM;
2249 ehi->action |= ATA_EH_RESET;
2253 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2254 !(pp->ncq_flags & ncq_saw_dmas)) {
2255 ata_stat = ap->ops->check_status(ap);
2256 if (ata_stat & ATA_BUSY)
2259 if (pp->defer_queue.defer_bits) {
2260 DPRINTK("send next command\n");
2261 qc = nv_swncq_qc_from_dq(ap);
2262 nv_swncq_issue_atacmd(ap, qc);
2267 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2268 /* program the dma controller with appropriate PRD buffers
2269 * and start the DMA transfer for requested command.
2271 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2272 pp->ncq_flags |= ncq_saw_dmas;
2273 rc = nv_swncq_dmafis(ap);
2279 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2280 ata_port_freeze(ap);
2284 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2286 struct ata_host *host = dev_instance;
2288 unsigned int handled = 0;
2289 unsigned long flags;
2292 spin_lock_irqsave(&host->lock, flags);
2294 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2296 for (i = 0; i < host->n_ports; i++) {
2297 struct ata_port *ap = host->ports[i];
2299 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2300 if (ap->link.sactive) {
2301 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2304 if (irq_stat) /* reserve Hotplug */
2305 nv_swncq_irq_clear(ap, 0xfff0);
2307 handled += nv_host_intr(ap, (u8)irq_stat);
2310 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2313 spin_unlock_irqrestore(&host->lock, flags);
2315 return IRQ_RETVAL(handled);
2318 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2320 static int printed_version;
2321 const struct ata_port_info *ppi[] = { NULL, NULL };
2322 struct nv_pi_priv *ipriv;
2323 struct ata_host *host;
2324 struct nv_host_priv *hpriv;
2328 unsigned long type = ent->driver_data;
2330 // Make sure this is a SATA controller by counting the number of bars
2331 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2332 // it's an IDE controller and we ignore it.
2333 for (bar = 0; bar < 6; bar++)
2334 if (pci_resource_start(pdev, bar) == 0)
2337 if (!printed_version++)
2338 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2340 rc = pcim_enable_device(pdev);
2344 /* determine type and allocate host */
2345 if (type == CK804 && adma_enabled) {
2346 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2350 if (type == SWNCQ) {
2352 dev_printk(KERN_NOTICE, &pdev->dev,
2353 "Using SWNCQ mode\n");
2358 ppi[0] = &nv_port_info[type];
2359 ipriv = ppi[0]->private_data;
2360 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2364 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2368 host->private_data = hpriv;
2370 /* request and iomap NV_MMIO_BAR */
2371 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2375 /* configure SCR access */
2376 base = host->iomap[NV_MMIO_BAR];
2377 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2378 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2380 /* enable SATA space for CK804 */
2381 if (type >= CK804) {
2384 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2385 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2386 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2391 rc = nv_adma_host_init(host);
2394 } else if (type == SWNCQ)
2395 nv_swncq_host_init(host);
2397 pci_set_master(pdev);
2398 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2399 IRQF_SHARED, ipriv->sht);
2403 static int nv_pci_device_resume(struct pci_dev *pdev)
2405 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2406 struct nv_host_priv *hpriv = host->private_data;
2409 rc = ata_pci_device_do_resume(pdev);
2413 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2414 if (hpriv->type >= CK804) {
2417 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2418 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2419 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2421 if (hpriv->type == ADMA) {
2423 struct nv_adma_port_priv *pp;
2424 /* enable/disable ADMA on the ports appropriately */
2425 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2427 pp = host->ports[0]->private_data;
2428 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2429 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2430 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2432 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2433 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2434 pp = host->ports[1]->private_data;
2435 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2436 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2437 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2439 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2440 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2442 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2446 ata_host_resume(host);
2452 static void nv_ck804_host_stop(struct ata_host *host)
2454 struct pci_dev *pdev = to_pci_dev(host->dev);
2457 /* disable SATA space for CK804 */
2458 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
2459 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2460 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2463 static void nv_adma_host_stop(struct ata_host *host)
2465 struct pci_dev *pdev = to_pci_dev(host->dev);
2468 /* disable ADMA on the ports */
2469 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2470 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2471 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2472 NV_MCP_SATA_CFG_20_PORT1_EN |
2473 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2475 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2477 nv_ck804_host_stop(host);
2480 static int __init nv_init(void)
2482 return pci_register_driver(&nv_pci_driver);
2485 static void __exit nv_exit(void)
2487 pci_unregister_driver(&nv_pci_driver);
2490 module_init(nv_init);
2491 module_exit(nv_exit);
2492 module_param_named(adma, adma_enabled, bool, 0444);
2493 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2494 module_param_named(swncq, swncq_enabled, bool, 0444);
2495 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");