2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
224 struct nv_host_priv {
228 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231 static void nv_remove_one (struct pci_dev *pdev);
232 static int nv_pci_device_resume(struct pci_dev *pdev);
233 static void nv_ck804_host_stop(struct ata_host *host);
234 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
237 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
240 static void nv_nf2_freeze(struct ata_port *ap);
241 static void nv_nf2_thaw(struct ata_port *ap);
242 static void nv_ck804_freeze(struct ata_port *ap);
243 static void nv_ck804_thaw(struct ata_port *ap);
244 static void nv_error_handler(struct ata_port *ap);
245 static int nv_adma_slave_config(struct scsi_device *sdev);
246 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
247 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250 static void nv_adma_irq_clear(struct ata_port *ap);
251 static int nv_adma_port_start(struct ata_port *ap);
252 static void nv_adma_port_stop(struct ata_port *ap);
253 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254 static int nv_adma_port_resume(struct ata_port *ap);
255 static void nv_adma_error_handler(struct ata_port *ap);
256 static void nv_adma_host_stop(struct ata_host *host);
257 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260 static u8 nv_adma_bmdma_status(struct ata_port *ap);
266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
271 static const struct pci_device_id nv_pci_tbl[] = {
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
293 { } /* terminate list */
296 static struct pci_driver nv_pci_driver = {
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
325 static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
345 static const struct ata_port_operations nv_generic_ops = {
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
362 .data_xfer = ata_data_xfer,
363 .irq_handler = nv_generic_interrupt,
364 .irq_clear = ata_bmdma_irq_clear,
365 .scr_read = nv_scr_read,
366 .scr_write = nv_scr_write,
367 .port_start = ata_port_start,
370 static const struct ata_port_operations nv_nf2_ops = {
371 .port_disable = ata_port_disable,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .exec_command = ata_exec_command,
375 .check_status = ata_check_status,
376 .dev_select = ata_std_dev_select,
377 .bmdma_setup = ata_bmdma_setup,
378 .bmdma_start = ata_bmdma_start,
379 .bmdma_stop = ata_bmdma_stop,
380 .bmdma_status = ata_bmdma_status,
381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot,
383 .freeze = nv_nf2_freeze,
385 .error_handler = nv_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
387 .data_xfer = ata_data_xfer,
388 .irq_handler = nv_nf2_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .scr_read = nv_scr_read,
391 .scr_write = nv_scr_write,
392 .port_start = ata_port_start,
395 static const struct ata_port_operations nv_ck804_ops = {
396 .port_disable = ata_port_disable,
397 .tf_load = ata_tf_load,
398 .tf_read = ata_tf_read,
399 .exec_command = ata_exec_command,
400 .check_status = ata_check_status,
401 .dev_select = ata_std_dev_select,
402 .bmdma_setup = ata_bmdma_setup,
403 .bmdma_start = ata_bmdma_start,
404 .bmdma_stop = ata_bmdma_stop,
405 .bmdma_status = ata_bmdma_status,
406 .qc_prep = ata_qc_prep,
407 .qc_issue = ata_qc_issue_prot,
408 .freeze = nv_ck804_freeze,
409 .thaw = nv_ck804_thaw,
410 .error_handler = nv_error_handler,
411 .post_internal_cmd = ata_bmdma_post_internal_cmd,
412 .data_xfer = ata_data_xfer,
413 .irq_handler = nv_ck804_interrupt,
414 .irq_clear = ata_bmdma_irq_clear,
415 .scr_read = nv_scr_read,
416 .scr_write = nv_scr_write,
417 .port_start = ata_port_start,
418 .host_stop = nv_ck804_host_stop,
421 static const struct ata_port_operations nv_adma_ops = {
422 .port_disable = ata_port_disable,
423 .tf_load = ata_tf_load,
424 .tf_read = ata_tf_read,
425 .check_atapi_dma = nv_adma_check_atapi_dma,
426 .exec_command = ata_exec_command,
427 .check_status = ata_check_status,
428 .dev_select = ata_std_dev_select,
429 .bmdma_setup = nv_adma_bmdma_setup,
430 .bmdma_start = nv_adma_bmdma_start,
431 .bmdma_stop = nv_adma_bmdma_stop,
432 .bmdma_status = nv_adma_bmdma_status,
433 .qc_prep = nv_adma_qc_prep,
434 .qc_issue = nv_adma_qc_issue,
435 .freeze = nv_ck804_freeze,
436 .thaw = nv_ck804_thaw,
437 .error_handler = nv_adma_error_handler,
438 .post_internal_cmd = nv_adma_bmdma_stop,
439 .data_xfer = ata_data_xfer,
440 .irq_handler = nv_adma_interrupt,
441 .irq_clear = nv_adma_irq_clear,
442 .scr_read = nv_scr_read,
443 .scr_write = nv_scr_write,
444 .port_start = nv_adma_port_start,
445 .port_stop = nv_adma_port_stop,
446 .port_suspend = nv_adma_port_suspend,
447 .port_resume = nv_adma_port_resume,
448 .host_stop = nv_adma_host_stop,
451 static struct ata_port_info nv_port_info[] = {
455 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
456 ATA_FLAG_HRST_TO_RESUME,
457 .pio_mask = NV_PIO_MASK,
458 .mwdma_mask = NV_MWDMA_MASK,
459 .udma_mask = NV_UDMA_MASK,
460 .port_ops = &nv_generic_ops,
465 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
466 ATA_FLAG_HRST_TO_RESUME,
467 .pio_mask = NV_PIO_MASK,
468 .mwdma_mask = NV_MWDMA_MASK,
469 .udma_mask = NV_UDMA_MASK,
470 .port_ops = &nv_nf2_ops,
475 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
476 ATA_FLAG_HRST_TO_RESUME,
477 .pio_mask = NV_PIO_MASK,
478 .mwdma_mask = NV_MWDMA_MASK,
479 .udma_mask = NV_UDMA_MASK,
480 .port_ops = &nv_ck804_ops,
485 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
486 ATA_FLAG_HRST_TO_RESUME |
487 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
488 .pio_mask = NV_PIO_MASK,
489 .mwdma_mask = NV_MWDMA_MASK,
490 .udma_mask = NV_UDMA_MASK,
491 .port_ops = &nv_adma_ops,
495 MODULE_AUTHOR("NVIDIA");
496 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
497 MODULE_LICENSE("GPL");
498 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
499 MODULE_VERSION(DRV_VERSION);
501 static int adma_enabled = 1;
503 static void nv_adma_register_mode(struct ata_port *ap)
505 struct nv_adma_port_priv *pp = ap->private_data;
506 void __iomem *mmio = pp->ctl_block;
509 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
512 tmp = readw(mmio + NV_ADMA_CTL);
513 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
515 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
518 static void nv_adma_mode(struct ata_port *ap)
520 struct nv_adma_port_priv *pp = ap->private_data;
521 void __iomem *mmio = pp->ctl_block;
524 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
527 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
529 tmp = readw(mmio + NV_ADMA_CTL);
530 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
532 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
535 static int nv_adma_slave_config(struct scsi_device *sdev)
537 struct ata_port *ap = ata_shost_to_port(sdev->host);
538 struct nv_adma_port_priv *pp = ap->private_data;
539 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
541 unsigned long segment_boundary;
542 unsigned short sg_tablesize;
545 u32 current_reg, new_reg, config_mask;
547 rc = ata_scsi_slave_config(sdev);
549 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
550 /* Not a proper libata device, ignore */
553 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
555 * NVIDIA reports that ADMA mode does not support ATAPI commands.
556 * Therefore ATAPI commands are sent through the legacy interface.
557 * However, the legacy interface only supports 32-bit DMA.
558 * Restrict DMA parameters as required by the legacy interface
559 * when an ATAPI device is connected.
561 bounce_limit = ATA_DMA_MASK;
562 segment_boundary = ATA_DMA_BOUNDARY;
563 /* Subtract 1 since an extra entry may be needed for padding, see
565 sg_tablesize = LIBATA_MAX_PRD - 1;
567 /* Since the legacy DMA engine is in use, we need to disable ADMA
570 nv_adma_register_mode(ap);
573 bounce_limit = *ap->dev->dma_mask;
574 segment_boundary = NV_ADMA_DMA_BOUNDARY;
575 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
579 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
582 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
583 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
585 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
586 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
589 new_reg = current_reg | config_mask;
590 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
593 new_reg = current_reg & ~config_mask;
594 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
597 if(current_reg != new_reg)
598 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
600 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
601 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
602 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
603 ata_port_printk(ap, KERN_INFO,
604 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
605 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
609 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
611 struct nv_adma_port_priv *pp = qc->ap->private_data;
612 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
615 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
617 unsigned int idx = 0;
619 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
621 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
622 cpb[idx++] = cpu_to_le16(IGN);
623 cpb[idx++] = cpu_to_le16(IGN);
624 cpb[idx++] = cpu_to_le16(IGN);
625 cpb[idx++] = cpu_to_le16(IGN);
626 cpb[idx++] = cpu_to_le16(IGN);
629 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
630 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
631 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
632 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
633 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
635 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
636 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
641 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
646 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
648 struct nv_adma_port_priv *pp = ap->private_data;
649 int complete = 0, have_err = 0;
650 u8 flags = pp->cpb[cpb_num].resp_flags;
652 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
654 if (flags & NV_CPB_RESP_DONE) {
655 VPRINTK("CPB flags done, flags=0x%x\n", flags);
658 if (flags & NV_CPB_RESP_ATA_ERR) {
659 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
663 if (flags & NV_CPB_RESP_CMD_ERR) {
664 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
668 if (flags & NV_CPB_RESP_CPB_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
673 if(complete || force_err)
675 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
678 /* Only use the ATA port status for non-NCQ commands.
679 For NCQ commands the current status may have nothing to do with
680 the command just completed. */
681 if(qc->tf.protocol != ATA_PROT_NCQ)
682 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
684 if(have_err || force_err)
685 ata_status |= ATA_ERR;
687 qc->err_mask |= ac_err_mask(ata_status);
688 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
695 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
697 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
699 /* freeze if hotplugged */
700 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
705 /* bail out if not our interrupt */
706 if (!(irq_stat & NV_INT_DEV))
709 /* DEV interrupt w/ no active qc? */
710 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
711 ata_check_status(ap);
715 /* handle interrupt */
716 return ata_host_intr(ap, qc);
719 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
721 struct ata_host *host = dev_instance;
723 u32 notifier_clears[2];
725 spin_lock(&host->lock);
727 for (i = 0; i < host->n_ports; i++) {
728 struct ata_port *ap = host->ports[i];
729 notifier_clears[i] = 0;
731 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
732 struct nv_adma_port_priv *pp = ap->private_data;
733 void __iomem *mmio = pp->ctl_block;
736 int have_global_err = 0;
737 u32 notifier, notifier_error;
739 /* if in ATA register mode, use standard ata interrupt handler */
740 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
741 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
742 >> (NV_INT_PORT_SHIFT * i);
743 if(ata_tag_valid(ap->active_tag))
744 /** NV_INT_DEV indication seems unreliable at times
745 at least in ADMA mode. Force it on always when a
746 command is active, to prevent losing interrupts. */
747 irq_stat |= NV_INT_DEV;
748 handled += nv_host_intr(ap, irq_stat);
752 notifier = readl(mmio + NV_ADMA_NOTIFIER);
753 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
754 notifier_clears[i] = notifier | notifier_error;
756 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
758 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
763 status = readw(mmio + NV_ADMA_STAT);
765 /* Clear status. Ensure the controller sees the clearing before we start
766 looking at any of the CPB statuses, so that any CPB completions after
767 this point in the handler will raise another interrupt. */
768 writew(status, mmio + NV_ADMA_STAT);
769 readw(mmio + NV_ADMA_STAT); /* flush posted write */
772 /* freeze if hotplugged */
773 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
774 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
780 if (status & NV_ADMA_STAT_TIMEOUT) {
781 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
784 if (status & NV_ADMA_STAT_CPBERR) {
785 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
788 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
789 /** Check CPBs for completed commands */
791 if(ata_tag_valid(ap->active_tag))
792 /* Non-NCQ command */
793 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
794 (notifier_error & (1 << ap->active_tag)));
797 u32 active = ap->sactive;
798 while( (pos = ffs(active)) ) {
800 nv_adma_check_cpb(ap, pos, have_global_err ||
801 (notifier_error & (1 << pos)) );
802 active &= ~(1 << pos );
807 handled++; /* irq handled if we got here */
811 if(notifier_clears[0] || notifier_clears[1]) {
812 /* Note: Both notifier clear registers must be written
813 if either is set, even if one is zero, according to NVIDIA. */
814 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
815 writel(notifier_clears[0], pp->notifier_clear_block);
816 pp = host->ports[1]->private_data;
817 writel(notifier_clears[1], pp->notifier_clear_block);
820 spin_unlock(&host->lock);
822 return IRQ_RETVAL(handled);
825 static void nv_adma_irq_clear(struct ata_port *ap)
827 struct nv_adma_port_priv *pp = ap->private_data;
828 void __iomem *mmio = pp->ctl_block;
829 u16 status = readw(mmio + NV_ADMA_STAT);
830 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
831 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
832 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
834 /* clear ADMA status */
835 writew(status, mmio + NV_ADMA_STAT);
836 writel(notifier | notifier_error,
837 pp->notifier_clear_block);
839 /** clear legacy status */
840 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
843 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
845 struct ata_port *ap = qc->ap;
846 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
847 struct nv_adma_port_priv *pp = ap->private_data;
850 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
855 /* load PRD table addr. */
856 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
858 /* specify data direction, triple-check start bit is clear */
859 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
860 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
862 dmactl |= ATA_DMA_WR;
864 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
866 /* issue r/w command */
867 ata_exec_command(ap, &qc->tf);
870 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
872 struct ata_port *ap = qc->ap;
873 struct nv_adma_port_priv *pp = ap->private_data;
876 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
881 /* start host DMA transaction */
882 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
883 iowrite8(dmactl | ATA_DMA_START,
884 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
887 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
889 struct ata_port *ap = qc->ap;
890 struct nv_adma_port_priv *pp = ap->private_data;
892 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
895 /* clear start/stop bit */
896 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
897 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
899 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
900 ata_altstatus(ap); /* dummy read */
903 static u8 nv_adma_bmdma_status(struct ata_port *ap)
905 struct nv_adma_port_priv *pp = ap->private_data;
907 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
909 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
912 static int nv_adma_port_start(struct ata_port *ap)
914 struct device *dev = ap->host->dev;
915 struct nv_adma_port_priv *pp;
924 rc = ata_port_start(ap);
928 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
932 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
933 ap->port_no * NV_ADMA_PORT_SIZE;
934 pp->ctl_block = mmio;
935 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
936 pp->notifier_clear_block = pp->gen_block +
937 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
939 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
940 &mem_dma, GFP_KERNEL);
943 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
946 * First item in chunk of DMA memory:
947 * 128-byte command parameter block (CPB)
948 * one for each command tag
951 pp->cpb_dma = mem_dma;
953 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
954 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
956 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
957 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
960 * Second item: block of ADMA_SGTBL_LEN s/g entries
963 pp->aprd_dma = mem_dma;
965 ap->private_data = pp;
967 /* clear any outstanding interrupt conditions */
968 writew(0xffff, mmio + NV_ADMA_STAT);
970 /* initialize port variables */
971 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
973 /* clear CPB fetch count */
974 writew(0, mmio + NV_ADMA_CPB_COUNT);
976 /* clear GO for register mode, enable interrupt */
977 tmp = readw(mmio + NV_ADMA_CTL);
978 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
980 tmp = readw(mmio + NV_ADMA_CTL);
981 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
982 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
984 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
985 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
990 static void nv_adma_port_stop(struct ata_port *ap)
992 struct nv_adma_port_priv *pp = ap->private_data;
993 void __iomem *mmio = pp->ctl_block;
996 writew(0, mmio + NV_ADMA_CTL);
999 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1001 struct nv_adma_port_priv *pp = ap->private_data;
1002 void __iomem *mmio = pp->ctl_block;
1004 /* Go to register mode - clears GO */
1005 nv_adma_register_mode(ap);
1007 /* clear CPB fetch count */
1008 writew(0, mmio + NV_ADMA_CPB_COUNT);
1010 /* disable interrupt, shut down port */
1011 writew(0, mmio + NV_ADMA_CTL);
1016 static int nv_adma_port_resume(struct ata_port *ap)
1018 struct nv_adma_port_priv *pp = ap->private_data;
1019 void __iomem *mmio = pp->ctl_block;
1022 /* set CPB block location */
1023 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1024 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1026 /* clear any outstanding interrupt conditions */
1027 writew(0xffff, mmio + NV_ADMA_STAT);
1029 /* initialize port variables */
1030 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1032 /* clear CPB fetch count */
1033 writew(0, mmio + NV_ADMA_CPB_COUNT);
1035 /* clear GO for register mode, enable interrupt */
1036 tmp = readw(mmio + NV_ADMA_CTL);
1037 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1041 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1043 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1044 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1049 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1051 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1052 struct ata_ioports *ioport = &probe_ent->port[port];
1056 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1058 ioport->cmd_addr = mmio;
1059 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1060 ioport->error_addr =
1061 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1062 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1063 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1064 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1065 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1066 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1067 ioport->status_addr =
1068 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1069 ioport->altstatus_addr =
1070 ioport->ctl_addr = mmio + 0x20;
1073 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1075 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1081 /* enable ADMA on the ports */
1082 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1083 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1084 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1085 NV_MCP_SATA_CFG_20_PORT1_EN |
1086 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1088 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1090 for (i = 0; i < probe_ent->n_ports; i++)
1091 nv_adma_setup_port(probe_ent, i);
1096 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1097 struct scatterlist *sg,
1099 struct nv_adma_prd *aprd)
1103 memset(aprd, 0, sizeof(struct nv_adma_prd));
1106 if (qc->tf.flags & ATA_TFLAG_WRITE)
1107 flags |= NV_APRD_WRITE;
1108 if (idx == qc->n_elem - 1)
1109 flags |= NV_APRD_END;
1111 flags |= NV_APRD_CONT;
1113 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1114 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1115 aprd->flags = flags;
1118 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1120 struct nv_adma_port_priv *pp = qc->ap->private_data;
1122 struct nv_adma_prd *aprd;
1123 struct scatterlist *sg;
1129 ata_for_each_sg(sg, qc) {
1130 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1131 nv_adma_fill_aprd(qc, sg, idx, aprd);
1135 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1138 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1140 struct nv_adma_port_priv *pp = qc->ap->private_data;
1141 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1142 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1143 NV_CPB_CTL_APRD_VALID |
1146 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1147 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1148 nv_adma_register_mode(qc->ap);
1153 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1157 cpb->next_cpb_idx = 0;
1159 /* turn on NCQ flags for NCQ commands */
1160 if (qc->tf.protocol == ATA_PROT_NCQ)
1161 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1163 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1165 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1167 nv_adma_fill_sg(qc, cpb);
1169 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1170 finished filling in all of the contents */
1172 cpb->ctl_flags = ctl_flags;
1175 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1177 struct nv_adma_port_priv *pp = qc->ap->private_data;
1178 void __iomem *mmio = pp->ctl_block;
1182 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1183 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1184 /* use ATA register mode */
1185 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1186 nv_adma_register_mode(qc->ap);
1187 return ata_qc_issue_prot(qc);
1189 nv_adma_mode(qc->ap);
1191 /* write append register, command tag in lower 8 bits
1192 and (number of cpbs to append -1) in top 8 bits */
1194 writew(qc->tag, mmio + NV_ADMA_APPEND);
1196 DPRINTK("Issued tag %u\n",qc->tag);
1201 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1203 struct ata_host *host = dev_instance;
1205 unsigned int handled = 0;
1206 unsigned long flags;
1208 spin_lock_irqsave(&host->lock, flags);
1210 for (i = 0; i < host->n_ports; i++) {
1211 struct ata_port *ap;
1213 ap = host->ports[i];
1215 !(ap->flags & ATA_FLAG_DISABLED)) {
1216 struct ata_queued_cmd *qc;
1218 qc = ata_qc_from_tag(ap, ap->active_tag);
1219 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1220 handled += ata_host_intr(ap, qc);
1222 // No request pending? Clear interrupt status
1223 // anyway, in case there's one pending.
1224 ap->ops->check_status(ap);
1229 spin_unlock_irqrestore(&host->lock, flags);
1231 return IRQ_RETVAL(handled);
1234 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1238 for (i = 0; i < host->n_ports; i++) {
1239 struct ata_port *ap = host->ports[i];
1241 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1242 handled += nv_host_intr(ap, irq_stat);
1244 irq_stat >>= NV_INT_PORT_SHIFT;
1247 return IRQ_RETVAL(handled);
1250 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1252 struct ata_host *host = dev_instance;
1256 spin_lock(&host->lock);
1257 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1258 ret = nv_do_interrupt(host, irq_stat);
1259 spin_unlock(&host->lock);
1264 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1266 struct ata_host *host = dev_instance;
1270 spin_lock(&host->lock);
1271 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1272 ret = nv_do_interrupt(host, irq_stat);
1273 spin_unlock(&host->lock);
1278 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1280 if (sc_reg > SCR_CONTROL)
1283 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1286 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1288 if (sc_reg > SCR_CONTROL)
1291 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1294 static void nv_nf2_freeze(struct ata_port *ap)
1296 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1297 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1300 mask = ioread8(scr_addr + NV_INT_ENABLE);
1301 mask &= ~(NV_INT_ALL << shift);
1302 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1305 static void nv_nf2_thaw(struct ata_port *ap)
1307 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1308 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1311 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1313 mask = ioread8(scr_addr + NV_INT_ENABLE);
1314 mask |= (NV_INT_MASK << shift);
1315 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1318 static void nv_ck804_freeze(struct ata_port *ap)
1320 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1321 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1324 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1325 mask &= ~(NV_INT_ALL << shift);
1326 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1329 static void nv_ck804_thaw(struct ata_port *ap)
1331 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1332 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1335 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1337 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1338 mask |= (NV_INT_MASK << shift);
1339 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1342 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1346 /* SATA hardreset fails to retrieve proper device signature on
1347 * some controllers. Don't classify on hardreset. For more
1348 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1350 return sata_std_hardreset(ap, &dummy);
1353 static void nv_error_handler(struct ata_port *ap)
1355 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1356 nv_hardreset, ata_std_postreset);
1359 static void nv_adma_error_handler(struct ata_port *ap)
1361 struct nv_adma_port_priv *pp = ap->private_data;
1362 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1363 void __iomem *mmio = pp->ctl_block;
1367 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1368 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1369 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1370 u32 status = readw(mmio + NV_ADMA_STAT);
1372 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1373 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1374 notifier, notifier_error, gen_ctl, status);
1376 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1377 struct nv_adma_cpb *cpb = &pp->cpb[i];
1378 if( cpb->ctl_flags || cpb->resp_flags )
1379 ata_port_printk(ap, KERN_ERR,
1380 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1381 i, cpb->ctl_flags, cpb->resp_flags);
1384 /* Push us back into port register mode for error handling. */
1385 nv_adma_register_mode(ap);
1387 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1389 /* Mark all of the CPBs as invalid to prevent them from being executed */
1390 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1391 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1393 /* clear CPB fetch count */
1394 writew(0, mmio + NV_ADMA_CPB_COUNT);
1397 tmp = readw(mmio + NV_ADMA_CTL);
1398 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1399 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1401 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1402 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1405 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1406 nv_hardreset, ata_std_postreset);
1409 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1411 static int printed_version = 0;
1412 struct ata_port_info *ppi[2];
1413 struct ata_probe_ent *probe_ent;
1414 struct nv_host_priv *hpriv;
1418 unsigned long type = ent->driver_data;
1421 // Make sure this is a SATA controller by counting the number of bars
1422 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1423 // it's an IDE controller and we ignore it.
1424 for (bar=0; bar<6; bar++)
1425 if (pci_resource_start(pdev, bar) == 0)
1428 if (!printed_version++)
1429 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1431 rc = pcim_enable_device(pdev);
1435 rc = pci_request_regions(pdev, DRV_NAME);
1437 pcim_pin_device(pdev);
1441 if(type >= CK804 && adma_enabled) {
1442 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1444 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1445 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1450 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1453 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1460 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1464 ppi[0] = ppi[1] = &nv_port_info[type];
1465 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1469 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1471 probe_ent->iomap = pcim_iomap_table(pdev);
1473 probe_ent->private_data = hpriv;
1476 base = probe_ent->iomap[NV_MMIO_BAR];
1477 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1478 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1480 /* enable SATA space for CK804 */
1481 if (type >= CK804) {
1484 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1485 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1486 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1489 pci_set_master(pdev);
1492 rc = nv_adma_host_init(probe_ent);
1497 rc = ata_device_add(probe_ent);
1501 devm_kfree(&pdev->dev, probe_ent);
1505 static void nv_remove_one (struct pci_dev *pdev)
1507 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1508 struct nv_host_priv *hpriv = host->private_data;
1510 ata_pci_remove_one(pdev);
1514 static int nv_pci_device_resume(struct pci_dev *pdev)
1516 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1517 struct nv_host_priv *hpriv = host->private_data;
1519 ata_pci_device_do_resume(pdev);
1521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1522 if(hpriv->type >= CK804) {
1525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1529 if(hpriv->type == ADMA) {
1531 struct nv_adma_port_priv *pp;
1532 /* enable/disable ADMA on the ports appropriately */
1533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1535 pp = host->ports[0]->private_data;
1536 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1542 pp = host->ports[1]->private_data;
1543 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1554 ata_host_resume(host);
1559 static void nv_ck804_host_stop(struct ata_host *host)
1561 struct pci_dev *pdev = to_pci_dev(host->dev);
1564 /* disable SATA space for CK804 */
1565 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1566 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1567 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1570 static void nv_adma_host_stop(struct ata_host *host)
1572 struct pci_dev *pdev = to_pci_dev(host->dev);
1575 /* disable ADMA on the ports */
1576 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1577 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1578 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1579 NV_MCP_SATA_CFG_20_PORT1_EN |
1580 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1582 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1584 nv_ck804_host_stop(host);
1587 static int __init nv_init(void)
1589 return pci_register_driver(&nv_pci_driver);
1592 static void __exit nv_exit(void)
1594 pci_unregister_driver(&nv_pci_driver);
1597 module_init(nv_init);
1598 module_exit(nv_exit);
1599 module_param_named(adma, adma_enabled, bool, 0444);
1600 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");