2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
216 struct nv_adma_prd *aprd;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
224 struct nv_host_priv {
228 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231 static void nv_remove_one (struct pci_dev *pdev);
232 static int nv_pci_device_resume(struct pci_dev *pdev);
233 static void nv_ck804_host_stop(struct ata_host *host);
234 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
237 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
240 static void nv_nf2_freeze(struct ata_port *ap);
241 static void nv_nf2_thaw(struct ata_port *ap);
242 static void nv_ck804_freeze(struct ata_port *ap);
243 static void nv_ck804_thaw(struct ata_port *ap);
244 static void nv_error_handler(struct ata_port *ap);
245 static int nv_adma_slave_config(struct scsi_device *sdev);
246 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
247 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250 static void nv_adma_irq_clear(struct ata_port *ap);
251 static int nv_adma_port_start(struct ata_port *ap);
252 static void nv_adma_port_stop(struct ata_port *ap);
253 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254 static int nv_adma_port_resume(struct ata_port *ap);
255 static void nv_adma_error_handler(struct ata_port *ap);
256 static void nv_adma_host_stop(struct ata_host *host);
257 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260 static u8 nv_adma_bmdma_status(struct ata_port *ap);
266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
271 static const struct pci_device_id nv_pci_tbl[] = {
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
293 { } /* terminate list */
296 static struct pci_driver nv_pci_driver = {
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
325 static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
345 static const struct ata_port_operations nv_generic_ops = {
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
362 .data_xfer = ata_data_xfer,
363 .irq_handler = nv_generic_interrupt,
364 .irq_clear = ata_bmdma_irq_clear,
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
367 .scr_read = nv_scr_read,
368 .scr_write = nv_scr_write,
369 .port_start = ata_port_start,
372 static const struct ata_port_operations nv_nf2_ops = {
373 .port_disable = ata_port_disable,
374 .tf_load = ata_tf_load,
375 .tf_read = ata_tf_read,
376 .exec_command = ata_exec_command,
377 .check_status = ata_check_status,
378 .dev_select = ata_std_dev_select,
379 .bmdma_setup = ata_bmdma_setup,
380 .bmdma_start = ata_bmdma_start,
381 .bmdma_stop = ata_bmdma_stop,
382 .bmdma_status = ata_bmdma_status,
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
385 .freeze = nv_nf2_freeze,
387 .error_handler = nv_error_handler,
388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
389 .data_xfer = ata_data_xfer,
390 .irq_handler = nv_nf2_interrupt,
391 .irq_clear = ata_bmdma_irq_clear,
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
394 .scr_read = nv_scr_read,
395 .scr_write = nv_scr_write,
396 .port_start = ata_port_start,
399 static const struct ata_port_operations nv_ck804_ops = {
400 .port_disable = ata_port_disable,
401 .tf_load = ata_tf_load,
402 .tf_read = ata_tf_read,
403 .exec_command = ata_exec_command,
404 .check_status = ata_check_status,
405 .dev_select = ata_std_dev_select,
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410 .qc_prep = ata_qc_prep,
411 .qc_issue = ata_qc_issue_prot,
412 .freeze = nv_ck804_freeze,
413 .thaw = nv_ck804_thaw,
414 .error_handler = nv_error_handler,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
416 .data_xfer = ata_data_xfer,
417 .irq_handler = nv_ck804_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
421 .scr_read = nv_scr_read,
422 .scr_write = nv_scr_write,
423 .port_start = ata_port_start,
424 .host_stop = nv_ck804_host_stop,
427 static const struct ata_port_operations nv_adma_ops = {
428 .port_disable = ata_port_disable,
429 .tf_load = ata_tf_load,
430 .tf_read = ata_tf_read,
431 .check_atapi_dma = nv_adma_check_atapi_dma,
432 .exec_command = ata_exec_command,
433 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status,
439 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop,
445 .data_xfer = ata_data_xfer,
446 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear,
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
450 .scr_read = nv_scr_read,
451 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop,
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
456 .host_stop = nv_adma_host_stop,
459 static struct ata_port_info nv_port_info[] = {
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_generic_ops,
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_nf2_ops,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_ck804_ops,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 ATA_FLAG_HRST_TO_RESUME |
495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_adma_ops,
503 MODULE_AUTHOR("NVIDIA");
504 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505 MODULE_LICENSE("GPL");
506 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507 MODULE_VERSION(DRV_VERSION);
509 static int adma_enabled = 1;
511 static void nv_adma_register_mode(struct ata_port *ap)
513 struct nv_adma_port_priv *pp = ap->private_data;
514 void __iomem *mmio = pp->ctl_block;
517 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
520 tmp = readw(mmio + NV_ADMA_CTL);
521 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
523 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
526 static void nv_adma_mode(struct ata_port *ap)
528 struct nv_adma_port_priv *pp = ap->private_data;
529 void __iomem *mmio = pp->ctl_block;
532 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
535 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
537 tmp = readw(mmio + NV_ADMA_CTL);
538 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
540 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
543 static int nv_adma_slave_config(struct scsi_device *sdev)
545 struct ata_port *ap = ata_shost_to_port(sdev->host);
546 struct nv_adma_port_priv *pp = ap->private_data;
547 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
549 unsigned long segment_boundary;
550 unsigned short sg_tablesize;
553 u32 current_reg, new_reg, config_mask;
555 rc = ata_scsi_slave_config(sdev);
557 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
558 /* Not a proper libata device, ignore */
561 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
563 * NVIDIA reports that ADMA mode does not support ATAPI commands.
564 * Therefore ATAPI commands are sent through the legacy interface.
565 * However, the legacy interface only supports 32-bit DMA.
566 * Restrict DMA parameters as required by the legacy interface
567 * when an ATAPI device is connected.
569 bounce_limit = ATA_DMA_MASK;
570 segment_boundary = ATA_DMA_BOUNDARY;
571 /* Subtract 1 since an extra entry may be needed for padding, see
573 sg_tablesize = LIBATA_MAX_PRD - 1;
575 /* Since the legacy DMA engine is in use, we need to disable ADMA
578 nv_adma_register_mode(ap);
581 bounce_limit = *ap->dev->dma_mask;
582 segment_boundary = NV_ADMA_DMA_BOUNDARY;
583 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
587 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
590 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
591 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
593 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
594 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
597 new_reg = current_reg | config_mask;
598 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
601 new_reg = current_reg & ~config_mask;
602 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
605 if(current_reg != new_reg)
606 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
608 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
609 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
610 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
611 ata_port_printk(ap, KERN_INFO,
612 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
613 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
617 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
619 struct nv_adma_port_priv *pp = qc->ap->private_data;
620 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
623 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
625 unsigned int idx = 0;
627 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
629 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
632 cpb[idx++] = cpu_to_le16(IGN);
633 cpb[idx++] = cpu_to_le16(IGN);
634 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
638 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
639 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
640 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
641 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
643 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
644 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
647 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
649 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
654 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
656 struct nv_adma_port_priv *pp = ap->private_data;
657 int complete = 0, have_err = 0;
658 u8 flags = pp->cpb[cpb_num].resp_flags;
660 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
662 if (flags & NV_CPB_RESP_DONE) {
663 VPRINTK("CPB flags done, flags=0x%x\n", flags);
666 if (flags & NV_CPB_RESP_ATA_ERR) {
667 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
671 if (flags & NV_CPB_RESP_CMD_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
676 if (flags & NV_CPB_RESP_CPB_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
681 if(complete || force_err)
683 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
686 /* Only use the ATA port status for non-NCQ commands.
687 For NCQ commands the current status may have nothing to do with
688 the command just completed. */
689 if(qc->tf.protocol != ATA_PROT_NCQ)
690 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
692 if(have_err || force_err)
693 ata_status |= ATA_ERR;
695 qc->err_mask |= ac_err_mask(ata_status);
696 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
703 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
705 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
707 /* freeze if hotplugged */
708 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
713 /* bail out if not our interrupt */
714 if (!(irq_stat & NV_INT_DEV))
717 /* DEV interrupt w/ no active qc? */
718 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
719 ata_check_status(ap);
723 /* handle interrupt */
724 return ata_host_intr(ap, qc);
727 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
729 struct ata_host *host = dev_instance;
731 u32 notifier_clears[2];
733 spin_lock(&host->lock);
735 for (i = 0; i < host->n_ports; i++) {
736 struct ata_port *ap = host->ports[i];
737 notifier_clears[i] = 0;
739 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
740 struct nv_adma_port_priv *pp = ap->private_data;
741 void __iomem *mmio = pp->ctl_block;
744 int have_global_err = 0;
745 u32 notifier, notifier_error;
747 /* if in ATA register mode, use standard ata interrupt handler */
748 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
749 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
750 >> (NV_INT_PORT_SHIFT * i);
751 if(ata_tag_valid(ap->active_tag))
752 /** NV_INT_DEV indication seems unreliable at times
753 at least in ADMA mode. Force it on always when a
754 command is active, to prevent losing interrupts. */
755 irq_stat |= NV_INT_DEV;
756 handled += nv_host_intr(ap, irq_stat);
760 notifier = readl(mmio + NV_ADMA_NOTIFIER);
761 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
762 notifier_clears[i] = notifier | notifier_error;
764 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
766 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
771 status = readw(mmio + NV_ADMA_STAT);
773 /* Clear status. Ensure the controller sees the clearing before we start
774 looking at any of the CPB statuses, so that any CPB completions after
775 this point in the handler will raise another interrupt. */
776 writew(status, mmio + NV_ADMA_STAT);
777 readw(mmio + NV_ADMA_STAT); /* flush posted write */
780 /* freeze if hotplugged */
781 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
782 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
788 if (status & NV_ADMA_STAT_TIMEOUT) {
789 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
792 if (status & NV_ADMA_STAT_CPBERR) {
793 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
796 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
797 /** Check CPBs for completed commands */
799 if(ata_tag_valid(ap->active_tag))
800 /* Non-NCQ command */
801 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
802 (notifier_error & (1 << ap->active_tag)));
805 u32 active = ap->sactive;
806 while( (pos = ffs(active)) ) {
808 nv_adma_check_cpb(ap, pos, have_global_err ||
809 (notifier_error & (1 << pos)) );
810 active &= ~(1 << pos );
815 handled++; /* irq handled if we got here */
819 if(notifier_clears[0] || notifier_clears[1]) {
820 /* Note: Both notifier clear registers must be written
821 if either is set, even if one is zero, according to NVIDIA. */
822 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
823 writel(notifier_clears[0], pp->notifier_clear_block);
824 pp = host->ports[1]->private_data;
825 writel(notifier_clears[1], pp->notifier_clear_block);
828 spin_unlock(&host->lock);
830 return IRQ_RETVAL(handled);
833 static void nv_adma_irq_clear(struct ata_port *ap)
835 struct nv_adma_port_priv *pp = ap->private_data;
836 void __iomem *mmio = pp->ctl_block;
837 u16 status = readw(mmio + NV_ADMA_STAT);
838 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
839 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
840 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
842 /* clear ADMA status */
843 writew(status, mmio + NV_ADMA_STAT);
844 writel(notifier | notifier_error,
845 pp->notifier_clear_block);
847 /** clear legacy status */
848 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
851 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
853 struct ata_port *ap = qc->ap;
854 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
855 struct nv_adma_port_priv *pp = ap->private_data;
858 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
863 /* load PRD table addr. */
864 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
866 /* specify data direction, triple-check start bit is clear */
867 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
868 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
870 dmactl |= ATA_DMA_WR;
872 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 /* issue r/w command */
875 ata_exec_command(ap, &qc->tf);
878 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
880 struct ata_port *ap = qc->ap;
881 struct nv_adma_port_priv *pp = ap->private_data;
884 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
889 /* start host DMA transaction */
890 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
891 iowrite8(dmactl | ATA_DMA_START,
892 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
895 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
897 struct ata_port *ap = qc->ap;
898 struct nv_adma_port_priv *pp = ap->private_data;
900 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
903 /* clear start/stop bit */
904 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
905 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
907 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
908 ata_altstatus(ap); /* dummy read */
911 static u8 nv_adma_bmdma_status(struct ata_port *ap)
913 struct nv_adma_port_priv *pp = ap->private_data;
915 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
917 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
920 static int nv_adma_port_start(struct ata_port *ap)
922 struct device *dev = ap->host->dev;
923 struct nv_adma_port_priv *pp;
932 rc = ata_port_start(ap);
936 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
940 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
941 ap->port_no * NV_ADMA_PORT_SIZE;
942 pp->ctl_block = mmio;
943 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
944 pp->notifier_clear_block = pp->gen_block +
945 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
947 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
948 &mem_dma, GFP_KERNEL);
951 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
954 * First item in chunk of DMA memory:
955 * 128-byte command parameter block (CPB)
956 * one for each command tag
959 pp->cpb_dma = mem_dma;
961 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
962 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
964 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
965 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
968 * Second item: block of ADMA_SGTBL_LEN s/g entries
971 pp->aprd_dma = mem_dma;
973 ap->private_data = pp;
975 /* clear any outstanding interrupt conditions */
976 writew(0xffff, mmio + NV_ADMA_STAT);
978 /* initialize port variables */
979 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
981 /* clear CPB fetch count */
982 writew(0, mmio + NV_ADMA_CPB_COUNT);
984 /* clear GO for register mode, enable interrupt */
985 tmp = readw(mmio + NV_ADMA_CTL);
986 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
988 tmp = readw(mmio + NV_ADMA_CTL);
989 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
990 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
992 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
993 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
998 static void nv_adma_port_stop(struct ata_port *ap)
1000 struct nv_adma_port_priv *pp = ap->private_data;
1001 void __iomem *mmio = pp->ctl_block;
1004 writew(0, mmio + NV_ADMA_CTL);
1007 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1009 struct nv_adma_port_priv *pp = ap->private_data;
1010 void __iomem *mmio = pp->ctl_block;
1012 /* Go to register mode - clears GO */
1013 nv_adma_register_mode(ap);
1015 /* clear CPB fetch count */
1016 writew(0, mmio + NV_ADMA_CPB_COUNT);
1018 /* disable interrupt, shut down port */
1019 writew(0, mmio + NV_ADMA_CTL);
1024 static int nv_adma_port_resume(struct ata_port *ap)
1026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
1030 /* set CPB block location */
1031 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1032 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1034 /* clear any outstanding interrupt conditions */
1035 writew(0xffff, mmio + NV_ADMA_STAT);
1037 /* initialize port variables */
1038 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1040 /* clear CPB fetch count */
1041 writew(0, mmio + NV_ADMA_CPB_COUNT);
1043 /* clear GO for register mode, enable interrupt */
1044 tmp = readw(mmio + NV_ADMA_CTL);
1045 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1047 tmp = readw(mmio + NV_ADMA_CTL);
1048 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1049 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1051 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1052 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1057 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1059 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1060 struct ata_ioports *ioport = &probe_ent->port[port];
1064 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1066 ioport->cmd_addr = mmio;
1067 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1068 ioport->error_addr =
1069 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1070 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1071 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1072 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1073 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1074 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1075 ioport->status_addr =
1076 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1077 ioport->altstatus_addr =
1078 ioport->ctl_addr = mmio + 0x20;
1081 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1083 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1089 /* enable ADMA on the ports */
1090 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1091 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1092 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1093 NV_MCP_SATA_CFG_20_PORT1_EN |
1094 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1096 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1098 for (i = 0; i < probe_ent->n_ports; i++)
1099 nv_adma_setup_port(probe_ent, i);
1104 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1105 struct scatterlist *sg,
1107 struct nv_adma_prd *aprd)
1111 memset(aprd, 0, sizeof(struct nv_adma_prd));
1114 if (qc->tf.flags & ATA_TFLAG_WRITE)
1115 flags |= NV_APRD_WRITE;
1116 if (idx == qc->n_elem - 1)
1117 flags |= NV_APRD_END;
1119 flags |= NV_APRD_CONT;
1121 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1122 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1123 aprd->flags = flags;
1126 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1128 struct nv_adma_port_priv *pp = qc->ap->private_data;
1130 struct nv_adma_prd *aprd;
1131 struct scatterlist *sg;
1137 ata_for_each_sg(sg, qc) {
1138 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1139 nv_adma_fill_aprd(qc, sg, idx, aprd);
1143 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1146 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1148 struct nv_adma_port_priv *pp = qc->ap->private_data;
1149 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1150 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1151 NV_CPB_CTL_APRD_VALID |
1154 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1155 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1156 nv_adma_register_mode(qc->ap);
1161 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1165 cpb->next_cpb_idx = 0;
1167 /* turn on NCQ flags for NCQ commands */
1168 if (qc->tf.protocol == ATA_PROT_NCQ)
1169 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1171 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1173 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1175 nv_adma_fill_sg(qc, cpb);
1177 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1178 finished filling in all of the contents */
1180 cpb->ctl_flags = ctl_flags;
1183 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1185 struct nv_adma_port_priv *pp = qc->ap->private_data;
1186 void __iomem *mmio = pp->ctl_block;
1190 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1191 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1192 /* use ATA register mode */
1193 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1194 nv_adma_register_mode(qc->ap);
1195 return ata_qc_issue_prot(qc);
1197 nv_adma_mode(qc->ap);
1199 /* write append register, command tag in lower 8 bits
1200 and (number of cpbs to append -1) in top 8 bits */
1202 writew(qc->tag, mmio + NV_ADMA_APPEND);
1204 DPRINTK("Issued tag %u\n",qc->tag);
1209 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1211 struct ata_host *host = dev_instance;
1213 unsigned int handled = 0;
1214 unsigned long flags;
1216 spin_lock_irqsave(&host->lock, flags);
1218 for (i = 0; i < host->n_ports; i++) {
1219 struct ata_port *ap;
1221 ap = host->ports[i];
1223 !(ap->flags & ATA_FLAG_DISABLED)) {
1224 struct ata_queued_cmd *qc;
1226 qc = ata_qc_from_tag(ap, ap->active_tag);
1227 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1228 handled += ata_host_intr(ap, qc);
1230 // No request pending? Clear interrupt status
1231 // anyway, in case there's one pending.
1232 ap->ops->check_status(ap);
1237 spin_unlock_irqrestore(&host->lock, flags);
1239 return IRQ_RETVAL(handled);
1242 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1246 for (i = 0; i < host->n_ports; i++) {
1247 struct ata_port *ap = host->ports[i];
1249 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1250 handled += nv_host_intr(ap, irq_stat);
1252 irq_stat >>= NV_INT_PORT_SHIFT;
1255 return IRQ_RETVAL(handled);
1258 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1260 struct ata_host *host = dev_instance;
1264 spin_lock(&host->lock);
1265 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1266 ret = nv_do_interrupt(host, irq_stat);
1267 spin_unlock(&host->lock);
1272 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1274 struct ata_host *host = dev_instance;
1278 spin_lock(&host->lock);
1279 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1280 ret = nv_do_interrupt(host, irq_stat);
1281 spin_unlock(&host->lock);
1286 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1288 if (sc_reg > SCR_CONTROL)
1291 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1294 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1296 if (sc_reg > SCR_CONTROL)
1299 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1302 static void nv_nf2_freeze(struct ata_port *ap)
1304 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1305 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1308 mask = ioread8(scr_addr + NV_INT_ENABLE);
1309 mask &= ~(NV_INT_ALL << shift);
1310 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1313 static void nv_nf2_thaw(struct ata_port *ap)
1315 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1316 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1319 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1321 mask = ioread8(scr_addr + NV_INT_ENABLE);
1322 mask |= (NV_INT_MASK << shift);
1323 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1326 static void nv_ck804_freeze(struct ata_port *ap)
1328 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1329 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1332 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1333 mask &= ~(NV_INT_ALL << shift);
1334 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1337 static void nv_ck804_thaw(struct ata_port *ap)
1339 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1340 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1343 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1345 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1346 mask |= (NV_INT_MASK << shift);
1347 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1350 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1354 /* SATA hardreset fails to retrieve proper device signature on
1355 * some controllers. Don't classify on hardreset. For more
1356 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1358 return sata_std_hardreset(ap, &dummy);
1361 static void nv_error_handler(struct ata_port *ap)
1363 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1364 nv_hardreset, ata_std_postreset);
1367 static void nv_adma_error_handler(struct ata_port *ap)
1369 struct nv_adma_port_priv *pp = ap->private_data;
1370 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1371 void __iomem *mmio = pp->ctl_block;
1375 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1376 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1377 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1378 u32 status = readw(mmio + NV_ADMA_STAT);
1380 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1381 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1382 notifier, notifier_error, gen_ctl, status);
1384 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1385 struct nv_adma_cpb *cpb = &pp->cpb[i];
1386 if( cpb->ctl_flags || cpb->resp_flags )
1387 ata_port_printk(ap, KERN_ERR,
1388 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1389 i, cpb->ctl_flags, cpb->resp_flags);
1392 /* Push us back into port register mode for error handling. */
1393 nv_adma_register_mode(ap);
1395 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1397 /* Mark all of the CPBs as invalid to prevent them from being executed */
1398 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1399 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1401 /* clear CPB fetch count */
1402 writew(0, mmio + NV_ADMA_CPB_COUNT);
1405 tmp = readw(mmio + NV_ADMA_CTL);
1406 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1407 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1409 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1410 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1413 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1414 nv_hardreset, ata_std_postreset);
1417 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1419 static int printed_version = 0;
1420 struct ata_port_info *ppi[2];
1421 struct ata_probe_ent *probe_ent;
1422 struct nv_host_priv *hpriv;
1426 unsigned long type = ent->driver_data;
1429 // Make sure this is a SATA controller by counting the number of bars
1430 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1431 // it's an IDE controller and we ignore it.
1432 for (bar=0; bar<6; bar++)
1433 if (pci_resource_start(pdev, bar) == 0)
1436 if (!printed_version++)
1437 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1439 rc = pcim_enable_device(pdev);
1443 rc = pci_request_regions(pdev, DRV_NAME);
1445 pcim_pin_device(pdev);
1449 if(type >= CK804 && adma_enabled) {
1450 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1452 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1453 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1458 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1461 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1468 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1472 ppi[0] = ppi[1] = &nv_port_info[type];
1473 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1477 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1479 probe_ent->iomap = pcim_iomap_table(pdev);
1481 probe_ent->private_data = hpriv;
1484 base = probe_ent->iomap[NV_MMIO_BAR];
1485 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1486 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1488 /* enable SATA space for CK804 */
1489 if (type >= CK804) {
1492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1493 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1497 pci_set_master(pdev);
1500 rc = nv_adma_host_init(probe_ent);
1505 rc = ata_device_add(probe_ent);
1509 devm_kfree(&pdev->dev, probe_ent);
1513 static void nv_remove_one (struct pci_dev *pdev)
1515 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1516 struct nv_host_priv *hpriv = host->private_data;
1518 ata_pci_remove_one(pdev);
1522 static int nv_pci_device_resume(struct pci_dev *pdev)
1524 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1525 struct nv_host_priv *hpriv = host->private_data;
1527 ata_pci_device_do_resume(pdev);
1529 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1530 if(hpriv->type >= CK804) {
1533 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1534 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1535 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1537 if(hpriv->type == ADMA) {
1539 struct nv_adma_port_priv *pp;
1540 /* enable/disable ADMA on the ports appropriately */
1541 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1543 pp = host->ports[0]->private_data;
1544 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1545 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1546 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1548 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1549 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1550 pp = host->ports[1]->private_data;
1551 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1552 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1553 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1555 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1556 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1558 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1562 ata_host_resume(host);
1567 static void nv_ck804_host_stop(struct ata_host *host)
1569 struct pci_dev *pdev = to_pci_dev(host->dev);
1572 /* disable SATA space for CK804 */
1573 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
1574 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1575 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1578 static void nv_adma_host_stop(struct ata_host *host)
1580 struct pci_dev *pdev = to_pci_dev(host->dev);
1583 /* disable ADMA on the ports */
1584 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1585 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1586 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1587 NV_MCP_SATA_CFG_20_PORT1_EN |
1588 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1590 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1592 nv_ck804_host_stop(host);
1595 static int __init nv_init(void)
1597 return pci_register_driver(&nv_pci_driver);
1600 static void __exit nv_exit(void)
1602 pci_unregister_driver(&nv_pci_driver);
1605 module_init(nv_init);
1606 module_exit(nv_exit);
1607 module_param_named(adma, adma_enabled, bool, 0444);
1608 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");