2 * libata-sff.c - helper library for PCI IDE BMDMA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/libata.h>
38 #include <linux/highmem.h>
42 const struct ata_port_operations ata_sff_port_ops = {
43 .inherits = &ata_base_port_ops,
45 .qc_prep = ata_sff_qc_prep,
46 .qc_issue = ata_sff_qc_issue,
48 .freeze = ata_sff_freeze,
50 .prereset = ata_sff_prereset,
51 .softreset = ata_sff_softreset,
52 .error_handler = ata_sff_error_handler,
53 .post_internal_cmd = ata_sff_post_internal_cmd,
55 .sff_dev_select = ata_sff_dev_select,
56 .sff_check_status = ata_sff_check_status,
57 .sff_tf_load = ata_sff_tf_load,
58 .sff_tf_read = ata_sff_tf_read,
59 .sff_exec_command = ata_sff_exec_command,
60 .sff_data_xfer = ata_sff_data_xfer,
61 .sff_irq_on = ata_sff_irq_on,
62 .sff_irq_clear = ata_sff_irq_clear,
64 .port_start = ata_sff_port_start,
67 const struct ata_port_operations ata_bmdma_port_ops = {
68 .inherits = &ata_sff_port_ops,
70 .mode_filter = ata_bmdma_mode_filter,
72 .bmdma_setup = ata_bmdma_setup,
73 .bmdma_start = ata_bmdma_start,
74 .bmdma_stop = ata_bmdma_stop,
75 .bmdma_status = ata_bmdma_status,
79 * ata_fill_sg - Fill PCI IDE PRD table
80 * @qc: Metadata associated with taskfile to be transferred
82 * Fill PCI IDE PRD (scatter-gather) table with segments
83 * associated with the current disk command.
86 * spin_lock_irqsave(host lock)
89 static void ata_fill_sg(struct ata_queued_cmd *qc)
91 struct ata_port *ap = qc->ap;
92 struct scatterlist *sg;
96 for_each_sg(qc->sg, sg, qc->n_elem, si) {
100 /* determine if physical DMA addr spans 64K boundary.
101 * Note h/w doesn't support 64-bit, so we unconditionally
102 * truncate dma_addr_t to u32.
104 addr = (u32) sg_dma_address(sg);
105 sg_len = sg_dma_len(sg);
108 offset = addr & 0xffff;
110 if ((offset + sg_len) > 0x10000)
111 len = 0x10000 - offset;
113 ap->prd[pi].addr = cpu_to_le32(addr);
114 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
115 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
123 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
127 * ata_fill_sg_dumb - Fill PCI IDE PRD table
128 * @qc: Metadata associated with taskfile to be transferred
130 * Fill PCI IDE PRD (scatter-gather) table with segments
131 * associated with the current disk command. Perform the fill
132 * so that we avoid writing any length 64K records for
133 * controllers that don't follow the spec.
136 * spin_lock_irqsave(host lock)
139 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
141 struct ata_port *ap = qc->ap;
142 struct scatterlist *sg;
146 for_each_sg(qc->sg, sg, qc->n_elem, si) {
148 u32 sg_len, len, blen;
150 /* determine if physical DMA addr spans 64K boundary.
151 * Note h/w doesn't support 64-bit, so we unconditionally
152 * truncate dma_addr_t to u32.
154 addr = (u32) sg_dma_address(sg);
155 sg_len = sg_dma_len(sg);
158 offset = addr & 0xffff;
160 if ((offset + sg_len) > 0x10000)
161 len = 0x10000 - offset;
164 ap->prd[pi].addr = cpu_to_le32(addr);
166 /* Some PATA chipsets like the CS5530 can't
167 cope with 0x0000 meaning 64K as the spec says */
168 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
170 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
172 ap->prd[pi].flags_len = cpu_to_le32(blen);
173 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
181 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
185 * ata_sff_qc_prep - Prepare taskfile for submission
186 * @qc: Metadata associated with taskfile to be prepared
188 * Prepare ATA taskfile for submission.
191 * spin_lock_irqsave(host lock)
193 void ata_sff_qc_prep(struct ata_queued_cmd *qc)
195 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
202 * ata_sff_dumb_qc_prep - Prepare taskfile for submission
203 * @qc: Metadata associated with taskfile to be prepared
205 * Prepare ATA taskfile for submission.
208 * spin_lock_irqsave(host lock)
210 void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc)
212 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
215 ata_fill_sg_dumb(qc);
219 * ata_sff_check_status - Read device status reg & clear interrupt
220 * @ap: port where the device is
222 * Reads ATA taskfile status register for currently-selected device
223 * and return its value. This also clears pending interrupts
227 * Inherited from caller.
229 u8 ata_sff_check_status(struct ata_port *ap)
231 return ioread8(ap->ioaddr.status_addr);
235 * ata_sff_altstatus - Read device alternate status reg
236 * @ap: port where the device is
238 * Reads ATA taskfile alternate status register for
239 * currently-selected device and return its value.
241 * Note: may NOT be used as the check_altstatus() entry in
242 * ata_port_operations.
245 * Inherited from caller.
247 u8 ata_sff_altstatus(struct ata_port *ap)
249 if (ap->ops->sff_check_altstatus)
250 return ap->ops->sff_check_altstatus(ap);
252 return ioread8(ap->ioaddr.altstatus_addr);
256 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
257 * @ap: port containing status register to be polled
258 * @tmout_pat: impatience timeout
259 * @tmout: overall timeout
261 * Sleep until ATA Status register bit BSY clears,
262 * or a timeout occurs.
265 * Kernel thread context (may sleep).
268 * 0 on success, -errno otherwise.
270 int ata_sff_busy_sleep(struct ata_port *ap,
271 unsigned long tmout_pat, unsigned long tmout)
273 unsigned long timer_start, timeout;
276 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
277 timer_start = jiffies;
278 timeout = timer_start + tmout_pat;
279 while (status != 0xff && (status & ATA_BUSY) &&
280 time_before(jiffies, timeout)) {
282 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
285 if (status != 0xff && (status & ATA_BUSY))
286 ata_port_printk(ap, KERN_WARNING,
287 "port is slow to respond, please be patient "
288 "(Status 0x%x)\n", status);
290 timeout = timer_start + tmout;
291 while (status != 0xff && (status & ATA_BUSY) &&
292 time_before(jiffies, timeout)) {
294 status = ap->ops->sff_check_status(ap);
300 if (status & ATA_BUSY) {
301 ata_port_printk(ap, KERN_ERR, "port failed to respond "
302 "(%lu secs, Status 0x%x)\n",
311 * ata_sff_wait_ready - sleep until BSY clears, or timeout
312 * @ap: port containing status register to be polled
313 * @deadline: deadline jiffies for the operation
315 * Sleep until ATA Status register bit BSY clears, or timeout
319 * Kernel thread context (may sleep).
322 * 0 on success, -errno otherwise.
324 int ata_sff_wait_ready(struct ata_port *ap, unsigned long deadline)
326 unsigned long start = jiffies;
330 u8 status = ap->ops->sff_check_status(ap);
331 unsigned long now = jiffies;
333 if (!(status & ATA_BUSY))
335 if (!ata_link_online(&ap->link) && status == 0xff)
337 if (time_after(now, deadline))
340 if (!warned && time_after(now, start + 5 * HZ) &&
341 (deadline - now > 3 * HZ)) {
342 ata_port_printk(ap, KERN_WARNING,
343 "port is slow to respond, please be patient "
344 "(Status 0x%x)\n", status);
353 * ata_sff_dev_select - Select device 0/1 on ATA bus
354 * @ap: ATA channel to manipulate
355 * @device: ATA device (numbered from zero) to select
357 * Use the method defined in the ATA specification to
358 * make either device 0, or device 1, active on the
359 * ATA channel. Works with both PIO and MMIO.
361 * May be used as the dev_select() entry in ata_port_operations.
366 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
371 tmp = ATA_DEVICE_OBS;
373 tmp = ATA_DEVICE_OBS | ATA_DEV1;
375 iowrite8(tmp, ap->ioaddr.device_addr);
376 ata_sff_pause(ap); /* needed; also flushes, for mmio */
380 * ata_dev_select - Select device 0/1 on ATA bus
381 * @ap: ATA channel to manipulate
382 * @device: ATA device (numbered from zero) to select
383 * @wait: non-zero to wait for Status register BSY bit to clear
384 * @can_sleep: non-zero if context allows sleeping
386 * Use the method defined in the ATA specification to
387 * make either device 0, or device 1, active on the
390 * This is a high-level version of ata_sff_dev_select(), which
391 * additionally provides the services of inserting the proper
392 * pauses and status polling, where needed.
397 void ata_dev_select(struct ata_port *ap, unsigned int device,
398 unsigned int wait, unsigned int can_sleep)
400 if (ata_msg_probe(ap))
401 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
402 "device %u, wait %u\n", device, wait);
407 ap->ops->sff_dev_select(ap, device);
410 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
417 * ata_sff_irq_on - Enable interrupts on a port.
418 * @ap: Port on which interrupts are enabled.
420 * Enable interrupts on a legacy IDE device using MMIO or PIO,
421 * wait for idle, clear any pending interrupts.
424 * Inherited from caller.
426 u8 ata_sff_irq_on(struct ata_port *ap)
428 struct ata_ioports *ioaddr = &ap->ioaddr;
431 ap->ctl &= ~ATA_NIEN;
432 ap->last_ctl = ap->ctl;
434 if (ioaddr->ctl_addr)
435 iowrite8(ap->ctl, ioaddr->ctl_addr);
436 tmp = ata_wait_idle(ap);
438 ap->ops->sff_irq_clear(ap);
444 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
445 * @ap: Port associated with this ATA transaction.
447 * Clear interrupt and error flags in DMA status register.
449 * May be used as the irq_clear() entry in ata_port_operations.
452 * spin_lock_irqsave(host lock)
454 void ata_sff_irq_clear(struct ata_port *ap)
456 void __iomem *mmio = ap->ioaddr.bmdma_addr;
461 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
465 * ata_sff_tf_load - send taskfile registers to host controller
466 * @ap: Port to which output is sent
467 * @tf: ATA taskfile register set
469 * Outputs ATA taskfile to standard ATA host controller.
472 * Inherited from caller.
474 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
476 struct ata_ioports *ioaddr = &ap->ioaddr;
477 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
479 if (tf->ctl != ap->last_ctl) {
480 if (ioaddr->ctl_addr)
481 iowrite8(tf->ctl, ioaddr->ctl_addr);
482 ap->last_ctl = tf->ctl;
486 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
487 WARN_ON(!ioaddr->ctl_addr);
488 iowrite8(tf->hob_feature, ioaddr->feature_addr);
489 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
490 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
491 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
492 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
493 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
502 iowrite8(tf->feature, ioaddr->feature_addr);
503 iowrite8(tf->nsect, ioaddr->nsect_addr);
504 iowrite8(tf->lbal, ioaddr->lbal_addr);
505 iowrite8(tf->lbam, ioaddr->lbam_addr);
506 iowrite8(tf->lbah, ioaddr->lbah_addr);
507 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
515 if (tf->flags & ATA_TFLAG_DEVICE) {
516 iowrite8(tf->device, ioaddr->device_addr);
517 VPRINTK("device 0x%X\n", tf->device);
524 * ata_sff_tf_read - input device's ATA taskfile shadow registers
525 * @ap: Port from which input is read
526 * @tf: ATA taskfile register set for storing input
528 * Reads ATA taskfile registers for currently-selected device
529 * into @tf. Assumes the device has a fully SFF compliant task file
530 * layout and behaviour. If you device does not (eg has a different
531 * status method) then you will need to provide a replacement tf_read
534 * Inherited from caller.
536 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
538 struct ata_ioports *ioaddr = &ap->ioaddr;
540 tf->command = ata_sff_check_status(ap);
541 tf->feature = ioread8(ioaddr->error_addr);
542 tf->nsect = ioread8(ioaddr->nsect_addr);
543 tf->lbal = ioread8(ioaddr->lbal_addr);
544 tf->lbam = ioread8(ioaddr->lbam_addr);
545 tf->lbah = ioread8(ioaddr->lbah_addr);
546 tf->device = ioread8(ioaddr->device_addr);
548 if (tf->flags & ATA_TFLAG_LBA48) {
549 if (likely(ioaddr->ctl_addr)) {
550 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
551 tf->hob_feature = ioread8(ioaddr->error_addr);
552 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
553 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
554 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
555 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
556 iowrite8(tf->ctl, ioaddr->ctl_addr);
557 ap->last_ctl = tf->ctl;
564 * ata_sff_exec_command - issue ATA command to host controller
565 * @ap: port to which command is being issued
566 * @tf: ATA taskfile register set
568 * Issues ATA command, with proper synchronization with interrupt
569 * handler / other threads.
572 * spin_lock_irqsave(host lock)
574 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
576 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
578 iowrite8(tf->command, ap->ioaddr.command_addr);
583 * ata_tf_to_host - issue ATA taskfile to host controller
584 * @ap: port to which command is being issued
585 * @tf: ATA taskfile register set
587 * Issues ATA taskfile register set to ATA host controller,
588 * with proper synchronization with interrupt handler and
592 * spin_lock_irqsave(host lock)
594 static inline void ata_tf_to_host(struct ata_port *ap,
595 const struct ata_taskfile *tf)
597 ap->ops->sff_tf_load(ap, tf);
598 ap->ops->sff_exec_command(ap, tf);
602 * ata_sff_data_xfer - Transfer data by PIO
603 * @dev: device to target
605 * @buflen: buffer length
608 * Transfer data from/to the device data register by PIO.
611 * Inherited from caller.
616 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
617 unsigned int buflen, int rw)
619 struct ata_port *ap = dev->link->ap;
620 void __iomem *data_addr = ap->ioaddr.data_addr;
621 unsigned int words = buflen >> 1;
623 /* Transfer multiple of 2 bytes */
625 ioread16_rep(data_addr, buf, words);
627 iowrite16_rep(data_addr, buf, words);
629 /* Transfer trailing 1 byte, if any. */
630 if (unlikely(buflen & 0x01)) {
631 __le16 align_buf[1] = { 0 };
632 unsigned char *trailing_buf = buf + buflen - 1;
635 align_buf[0] = cpu_to_le16(ioread16(data_addr));
636 memcpy(trailing_buf, align_buf, 1);
638 memcpy(align_buf, trailing_buf, 1);
639 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
648 * ata_sff_data_xfer_noirq - Transfer data by PIO
649 * @dev: device to target
651 * @buflen: buffer length
654 * Transfer data from/to the device data register by PIO. Do the
655 * transfer with interrupts disabled.
658 * Inherited from caller.
663 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
664 unsigned int buflen, int rw)
667 unsigned int consumed;
669 local_irq_save(flags);
670 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
671 local_irq_restore(flags);
677 * ata_pio_sector - Transfer a sector of data.
678 * @qc: Command on going
680 * Transfer qc->sect_size bytes of data from/to the ATA device.
683 * Inherited from caller.
685 static void ata_pio_sector(struct ata_queued_cmd *qc)
687 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
688 struct ata_port *ap = qc->ap;
693 if (qc->curbytes == qc->nbytes - qc->sect_size)
694 ap->hsm_task_state = HSM_ST_LAST;
696 page = sg_page(qc->cursg);
697 offset = qc->cursg->offset + qc->cursg_ofs;
699 /* get the current page and offset */
700 page = nth_page(page, (offset >> PAGE_SHIFT));
703 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
705 if (PageHighMem(page)) {
708 /* FIXME: use a bounce buffer */
709 local_irq_save(flags);
710 buf = kmap_atomic(page, KM_IRQ0);
712 /* do the actual data transfer */
713 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
716 kunmap_atomic(buf, KM_IRQ0);
717 local_irq_restore(flags);
719 buf = page_address(page);
720 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
724 qc->curbytes += qc->sect_size;
725 qc->cursg_ofs += qc->sect_size;
727 if (qc->cursg_ofs == qc->cursg->length) {
728 qc->cursg = sg_next(qc->cursg);
734 * ata_pio_sectors - Transfer one or many sectors.
735 * @qc: Command on going
737 * Transfer one or many sectors of data from/to the
738 * ATA device for the DRQ request.
741 * Inherited from caller.
743 static void ata_pio_sectors(struct ata_queued_cmd *qc)
745 if (is_multi_taskfile(&qc->tf)) {
746 /* READ/WRITE MULTIPLE */
749 WARN_ON(qc->dev->multi_count == 0);
751 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
752 qc->dev->multi_count);
758 ata_sff_altstatus(qc->ap); /* flush */
762 * atapi_send_cdb - Write CDB bytes to hardware
763 * @ap: Port to which ATAPI device is attached.
764 * @qc: Taskfile currently active
766 * When device has indicated its readiness to accept
767 * a CDB, this function is called. Send the CDB.
772 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
775 DPRINTK("send cdb\n");
776 WARN_ON(qc->dev->cdb_len < 12);
778 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
779 ata_sff_altstatus(ap); /* flush */
781 switch (qc->tf.protocol) {
783 ap->hsm_task_state = HSM_ST;
785 case ATAPI_PROT_NODATA:
786 ap->hsm_task_state = HSM_ST_LAST;
789 ap->hsm_task_state = HSM_ST_LAST;
791 ap->ops->bmdma_start(qc);
797 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
798 * @qc: Command on going
799 * @bytes: number of bytes
801 * Transfer Transfer data from/to the ATAPI device.
804 * Inherited from caller.
807 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
809 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
810 struct ata_port *ap = qc->ap;
811 struct ata_device *dev = qc->dev;
812 struct ata_eh_info *ehi = &dev->link->eh_info;
813 struct scatterlist *sg;
816 unsigned int offset, count, consumed;
821 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
822 "buf=%u cur=%u bytes=%u",
823 qc->nbytes, qc->curbytes, bytes);
828 offset = sg->offset + qc->cursg_ofs;
830 /* get the current page and offset */
831 page = nth_page(page, (offset >> PAGE_SHIFT));
834 /* don't overrun current sg */
835 count = min(sg->length - qc->cursg_ofs, bytes);
837 /* don't cross page boundaries */
838 count = min(count, (unsigned int)PAGE_SIZE - offset);
840 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
842 if (PageHighMem(page)) {
845 /* FIXME: use bounce buffer */
846 local_irq_save(flags);
847 buf = kmap_atomic(page, KM_IRQ0);
849 /* do the actual data transfer */
850 consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw);
852 kunmap_atomic(buf, KM_IRQ0);
853 local_irq_restore(flags);
855 buf = page_address(page);
856 consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw);
859 bytes -= min(bytes, consumed);
860 qc->curbytes += count;
861 qc->cursg_ofs += count;
863 if (qc->cursg_ofs == sg->length) {
864 qc->cursg = sg_next(qc->cursg);
868 /* consumed can be larger than count only for the last transfer */
869 WARN_ON(qc->cursg && count != consumed);
877 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
878 * @qc: Command on going
880 * Transfer Transfer data from/to the ATAPI device.
883 * Inherited from caller.
885 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
887 struct ata_port *ap = qc->ap;
888 struct ata_device *dev = qc->dev;
889 struct ata_eh_info *ehi = &dev->link->eh_info;
890 unsigned int ireason, bc_lo, bc_hi, bytes;
891 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
893 /* Abuse qc->result_tf for temp storage of intermediate TF
894 * here to save some kernel stack usage.
895 * For normal completion, qc->result_tf is not relevant. For
896 * error, qc->result_tf is later overwritten by ata_qc_complete().
897 * So, the correctness of qc->result_tf is not affected.
899 ap->ops->sff_tf_read(ap, &qc->result_tf);
900 ireason = qc->result_tf.nsect;
901 bc_lo = qc->result_tf.lbam;
902 bc_hi = qc->result_tf.lbah;
903 bytes = (bc_hi << 8) | bc_lo;
905 /* shall be cleared to zero, indicating xfer of data */
906 if (unlikely(ireason & (1 << 0)))
909 /* make sure transfer direction matches expected */
910 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
911 if (unlikely(do_write != i_write))
914 if (unlikely(!bytes))
917 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
919 if (unlikely(__atapi_pio_bytes(qc, bytes)))
921 ata_sff_altstatus(ap); /* flush */
926 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
929 qc->err_mask |= AC_ERR_HSM;
930 ap->hsm_task_state = HSM_ST_ERR;
934 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
935 * @ap: the target ata_port
939 * 1 if ok in workqueue, 0 otherwise.
941 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
943 if (qc->tf.flags & ATA_TFLAG_POLLING)
946 if (ap->hsm_task_state == HSM_ST_FIRST) {
947 if (qc->tf.protocol == ATA_PROT_PIO &&
948 (qc->tf.flags & ATA_TFLAG_WRITE))
951 if (ata_is_atapi(qc->tf.protocol) &&
952 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
960 * ata_hsm_qc_complete - finish a qc running on standard HSM
961 * @qc: Command to complete
962 * @in_wq: 1 if called from workqueue, 0 otherwise
964 * Finish @qc which is running on standard HSM.
967 * If @in_wq is zero, spin_lock_irqsave(host lock).
968 * Otherwise, none on entry and grabs host lock.
970 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
972 struct ata_port *ap = qc->ap;
975 if (ap->ops->error_handler) {
977 spin_lock_irqsave(ap->lock, flags);
979 /* EH might have kicked in while host lock is
982 qc = ata_qc_from_tag(ap, qc->tag);
984 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
985 ap->ops->sff_irq_on(ap);
991 spin_unlock_irqrestore(ap->lock, flags);
993 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1000 spin_lock_irqsave(ap->lock, flags);
1001 ap->ops->sff_irq_on(ap);
1002 ata_qc_complete(qc);
1003 spin_unlock_irqrestore(ap->lock, flags);
1005 ata_qc_complete(qc);
1010 * ata_sff_hsm_move - move the HSM to the next state.
1011 * @ap: the target ata_port
1013 * @status: current device status
1014 * @in_wq: 1 if called from workqueue, 0 otherwise
1017 * 1 when poll next status needed, 0 otherwise.
1019 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1020 u8 status, int in_wq)
1022 unsigned long flags = 0;
1025 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1027 /* Make sure ata_sff_qc_issue() does not throw things
1028 * like DMA polling into the workqueue. Notice that
1029 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1031 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
1034 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1035 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1037 switch (ap->hsm_task_state) {
1039 /* Send first data block or PACKET CDB */
1041 /* If polling, we will stay in the work queue after
1042 * sending the data. Otherwise, interrupt handler
1043 * takes over after sending the data.
1045 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1047 /* check device status */
1048 if (unlikely((status & ATA_DRQ) == 0)) {
1049 /* handle BSY=0, DRQ=0 as error */
1050 if (likely(status & (ATA_ERR | ATA_DF)))
1051 /* device stops HSM for abort/error */
1052 qc->err_mask |= AC_ERR_DEV;
1054 /* HSM violation. Let EH handle this */
1055 qc->err_mask |= AC_ERR_HSM;
1057 ap->hsm_task_state = HSM_ST_ERR;
1061 /* Device should not ask for data transfer (DRQ=1)
1062 * when it finds something wrong.
1063 * We ignore DRQ here and stop the HSM by
1064 * changing hsm_task_state to HSM_ST_ERR and
1065 * let the EH abort the command or reset the device.
1067 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1068 /* Some ATAPI tape drives forget to clear the ERR bit
1069 * when doing the next command (mostly request sense).
1070 * We ignore ERR here to workaround and proceed sending
1073 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1074 ata_port_printk(ap, KERN_WARNING,
1075 "DRQ=1 with device error, "
1076 "dev_stat 0x%X\n", status);
1077 qc->err_mask |= AC_ERR_HSM;
1078 ap->hsm_task_state = HSM_ST_ERR;
1083 /* Send the CDB (atapi) or the first data block (ata pio out).
1084 * During the state transition, interrupt handler shouldn't
1085 * be invoked before the data transfer is complete and
1086 * hsm_task_state is changed. Hence, the following locking.
1089 spin_lock_irqsave(ap->lock, flags);
1091 if (qc->tf.protocol == ATA_PROT_PIO) {
1092 /* PIO data out protocol.
1093 * send first data block.
1096 /* ata_pio_sectors() might change the state
1097 * to HSM_ST_LAST. so, the state is changed here
1098 * before ata_pio_sectors().
1100 ap->hsm_task_state = HSM_ST;
1101 ata_pio_sectors(qc);
1104 atapi_send_cdb(ap, qc);
1107 spin_unlock_irqrestore(ap->lock, flags);
1109 /* if polling, ata_pio_task() handles the rest.
1110 * otherwise, interrupt handler takes over from here.
1115 /* complete command or read/write the data register */
1116 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1117 /* ATAPI PIO protocol */
1118 if ((status & ATA_DRQ) == 0) {
1119 /* No more data to transfer or device error.
1120 * Device error will be tagged in HSM_ST_LAST.
1122 ap->hsm_task_state = HSM_ST_LAST;
1126 /* Device should not ask for data transfer (DRQ=1)
1127 * when it finds something wrong.
1128 * We ignore DRQ here and stop the HSM by
1129 * changing hsm_task_state to HSM_ST_ERR and
1130 * let the EH abort the command or reset the device.
1132 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1133 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
1134 "device error, dev_stat 0x%X\n",
1136 qc->err_mask |= AC_ERR_HSM;
1137 ap->hsm_task_state = HSM_ST_ERR;
1141 atapi_pio_bytes(qc);
1143 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1144 /* bad ireason reported by device */
1148 /* ATA PIO protocol */
1149 if (unlikely((status & ATA_DRQ) == 0)) {
1150 /* handle BSY=0, DRQ=0 as error */
1151 if (likely(status & (ATA_ERR | ATA_DF)))
1152 /* device stops HSM for abort/error */
1153 qc->err_mask |= AC_ERR_DEV;
1155 /* HSM violation. Let EH handle this.
1156 * Phantom devices also trigger this
1157 * condition. Mark hint.
1159 qc->err_mask |= AC_ERR_HSM |
1162 ap->hsm_task_state = HSM_ST_ERR;
1166 /* For PIO reads, some devices may ask for
1167 * data transfer (DRQ=1) alone with ERR=1.
1168 * We respect DRQ here and transfer one
1169 * block of junk data before changing the
1170 * hsm_task_state to HSM_ST_ERR.
1172 * For PIO writes, ERR=1 DRQ=1 doesn't make
1173 * sense since the data block has been
1174 * transferred to the device.
1176 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1177 /* data might be corrputed */
1178 qc->err_mask |= AC_ERR_DEV;
1180 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1181 ata_pio_sectors(qc);
1182 status = ata_wait_idle(ap);
1185 if (status & (ATA_BUSY | ATA_DRQ))
1186 qc->err_mask |= AC_ERR_HSM;
1188 /* ata_pio_sectors() might change the
1189 * state to HSM_ST_LAST. so, the state
1190 * is changed after ata_pio_sectors().
1192 ap->hsm_task_state = HSM_ST_ERR;
1196 ata_pio_sectors(qc);
1198 if (ap->hsm_task_state == HSM_ST_LAST &&
1199 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1201 status = ata_wait_idle(ap);
1210 if (unlikely(!ata_ok(status))) {
1211 qc->err_mask |= __ac_err_mask(status);
1212 ap->hsm_task_state = HSM_ST_ERR;
1216 /* no more data to transfer */
1217 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1218 ap->print_id, qc->dev->devno, status);
1220 WARN_ON(qc->err_mask);
1222 ap->hsm_task_state = HSM_ST_IDLE;
1224 /* complete taskfile transaction */
1225 ata_hsm_qc_complete(qc, in_wq);
1231 /* make sure qc->err_mask is available to
1232 * know what's wrong and recover
1234 WARN_ON(qc->err_mask == 0);
1236 ap->hsm_task_state = HSM_ST_IDLE;
1238 /* complete taskfile transaction */
1239 ata_hsm_qc_complete(qc, in_wq);
1251 void ata_pio_task(struct work_struct *work)
1253 struct ata_port *ap =
1254 container_of(work, struct ata_port, port_task.work);
1255 struct ata_queued_cmd *qc = ap->port_task_data;
1260 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
1263 * This is purely heuristic. This is a fast path.
1264 * Sometimes when we enter, BSY will be cleared in
1265 * a chk-status or two. If not, the drive is probably seeking
1266 * or something. Snooze for a couple msecs, then
1267 * chk-status again. If still busy, queue delayed work.
1269 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1270 if (status & ATA_BUSY) {
1272 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1273 if (status & ATA_BUSY) {
1274 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1280 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1282 /* another command or interrupt handler
1283 * may be running at this point.
1290 * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner
1291 * @qc: command to issue to device
1293 * Using various libata functions and hooks, this function
1294 * starts an ATA command. ATA commands are grouped into
1295 * classes called "protocols", and issuing each type of protocol
1296 * is slightly different.
1298 * May be used as the qc_issue() entry in ata_port_operations.
1301 * spin_lock_irqsave(host lock)
1304 * Zero on success, AC_ERR_* mask on failure
1306 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1308 struct ata_port *ap = qc->ap;
1310 /* Use polling pio if the LLD doesn't handle
1311 * interrupt driven pio and atapi CDB interrupt.
1313 if (ap->flags & ATA_FLAG_PIO_POLLING) {
1314 switch (qc->tf.protocol) {
1316 case ATA_PROT_NODATA:
1317 case ATAPI_PROT_PIO:
1318 case ATAPI_PROT_NODATA:
1319 qc->tf.flags |= ATA_TFLAG_POLLING;
1321 case ATAPI_PROT_DMA:
1322 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
1323 /* see ata_dma_blacklisted() */
1331 /* select the device */
1332 ata_dev_select(ap, qc->dev->devno, 1, 0);
1334 /* start the command */
1335 switch (qc->tf.protocol) {
1336 case ATA_PROT_NODATA:
1337 if (qc->tf.flags & ATA_TFLAG_POLLING)
1338 ata_qc_set_polling(qc);
1340 ata_tf_to_host(ap, &qc->tf);
1341 ap->hsm_task_state = HSM_ST_LAST;
1343 if (qc->tf.flags & ATA_TFLAG_POLLING)
1344 ata_pio_queue_task(ap, qc, 0);
1349 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
1351 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1352 ap->ops->bmdma_setup(qc); /* set up bmdma */
1353 ap->ops->bmdma_start(qc); /* initiate bmdma */
1354 ap->hsm_task_state = HSM_ST_LAST;
1358 if (qc->tf.flags & ATA_TFLAG_POLLING)
1359 ata_qc_set_polling(qc);
1361 ata_tf_to_host(ap, &qc->tf);
1363 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1364 /* PIO data out protocol */
1365 ap->hsm_task_state = HSM_ST_FIRST;
1366 ata_pio_queue_task(ap, qc, 0);
1368 /* always send first data block using
1369 * the ata_pio_task() codepath.
1372 /* PIO data in protocol */
1373 ap->hsm_task_state = HSM_ST;
1375 if (qc->tf.flags & ATA_TFLAG_POLLING)
1376 ata_pio_queue_task(ap, qc, 0);
1378 /* if polling, ata_pio_task() handles the rest.
1379 * otherwise, interrupt handler takes over from here.
1385 case ATAPI_PROT_PIO:
1386 case ATAPI_PROT_NODATA:
1387 if (qc->tf.flags & ATA_TFLAG_POLLING)
1388 ata_qc_set_polling(qc);
1390 ata_tf_to_host(ap, &qc->tf);
1392 ap->hsm_task_state = HSM_ST_FIRST;
1394 /* send cdb by polling if no cdb interrupt */
1395 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1396 (qc->tf.flags & ATA_TFLAG_POLLING))
1397 ata_pio_queue_task(ap, qc, 0);
1400 case ATAPI_PROT_DMA:
1401 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
1403 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
1404 ap->ops->bmdma_setup(qc); /* set up bmdma */
1405 ap->hsm_task_state = HSM_ST_FIRST;
1407 /* send cdb by polling if no cdb interrupt */
1408 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1409 ata_pio_queue_task(ap, qc, 0);
1414 return AC_ERR_SYSTEM;
1421 * ata_sff_host_intr - Handle host interrupt for given (port, task)
1422 * @ap: Port on which interrupt arrived (possibly...)
1423 * @qc: Taskfile currently active in engine
1425 * Handle host interrupt for given queued command. Currently,
1426 * only DMA interrupts are handled. All other commands are
1427 * handled via polling with interrupts disabled (nIEN bit).
1430 * spin_lock_irqsave(host lock)
1433 * One if interrupt was handled, zero if not (shared irq).
1435 inline unsigned int ata_sff_host_intr(struct ata_port *ap,
1436 struct ata_queued_cmd *qc)
1438 struct ata_eh_info *ehi = &ap->link.eh_info;
1439 u8 status, host_stat = 0;
1441 VPRINTK("ata%u: protocol %d task_state %d\n",
1442 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1444 /* Check whether we are expecting interrupt in this state */
1445 switch (ap->hsm_task_state) {
1447 /* Some pre-ATAPI-4 devices assert INTRQ
1448 * at this state when ready to receive CDB.
1451 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1452 * The flag was turned on only for atapi devices. No
1453 * need to check ata_is_atapi(qc->tf.protocol) again.
1455 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1459 if (qc->tf.protocol == ATA_PROT_DMA ||
1460 qc->tf.protocol == ATAPI_PROT_DMA) {
1461 /* check status of DMA engine */
1462 host_stat = ap->ops->bmdma_status(ap);
1463 VPRINTK("ata%u: host_stat 0x%X\n",
1464 ap->print_id, host_stat);
1466 /* if it's not our irq... */
1467 if (!(host_stat & ATA_DMA_INTR))
1470 /* before we do anything else, clear DMA-Start bit */
1471 ap->ops->bmdma_stop(qc);
1473 if (unlikely(host_stat & ATA_DMA_ERR)) {
1474 /* error when transfering data to/from memory */
1475 qc->err_mask |= AC_ERR_HOST_BUS;
1476 ap->hsm_task_state = HSM_ST_ERR;
1486 /* check altstatus */
1487 status = ata_sff_altstatus(ap);
1488 if (status & ATA_BUSY)
1491 /* check main status, clearing INTRQ */
1492 status = ap->ops->sff_check_status(ap);
1493 if (unlikely(status & ATA_BUSY))
1496 /* ack bmdma irq events */
1497 ap->ops->sff_irq_clear(ap);
1499 ata_sff_hsm_move(ap, qc, status, 0);
1501 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1502 qc->tf.protocol == ATAPI_PROT_DMA))
1503 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1505 return 1; /* irq handled */
1508 ap->stats.idle_irq++;
1511 if ((ap->stats.idle_irq % 1000) == 0) {
1512 ap->ops->sff_check_status(ap);
1513 ap->ops->sff_irq_clear(ap);
1514 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1518 return 0; /* irq not handled */
1522 * ata_sff_interrupt - Default ATA host interrupt handler
1523 * @irq: irq line (unused)
1524 * @dev_instance: pointer to our ata_host information structure
1526 * Default interrupt handler for PCI IDE devices. Calls
1527 * ata_sff_host_intr() for each port that is not disabled.
1530 * Obtains host lock during operation.
1533 * IRQ_NONE or IRQ_HANDLED.
1535 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1537 struct ata_host *host = dev_instance;
1539 unsigned int handled = 0;
1540 unsigned long flags;
1542 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1543 spin_lock_irqsave(&host->lock, flags);
1545 for (i = 0; i < host->n_ports; i++) {
1546 struct ata_port *ap;
1548 ap = host->ports[i];
1550 !(ap->flags & ATA_FLAG_DISABLED)) {
1551 struct ata_queued_cmd *qc;
1553 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1554 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
1555 (qc->flags & ATA_QCFLAG_ACTIVE))
1556 handled |= ata_sff_host_intr(ap, qc);
1560 spin_unlock_irqrestore(&host->lock, flags);
1562 return IRQ_RETVAL(handled);
1566 * ata_sff_freeze - Freeze SFF controller port
1567 * @ap: port to freeze
1569 * Freeze BMDMA controller port.
1572 * Inherited from caller.
1574 void ata_sff_freeze(struct ata_port *ap)
1576 struct ata_ioports *ioaddr = &ap->ioaddr;
1578 ap->ctl |= ATA_NIEN;
1579 ap->last_ctl = ap->ctl;
1581 if (ioaddr->ctl_addr)
1582 iowrite8(ap->ctl, ioaddr->ctl_addr);
1584 /* Under certain circumstances, some controllers raise IRQ on
1585 * ATA_NIEN manipulation. Also, many controllers fail to mask
1586 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1588 ap->ops->sff_check_status(ap);
1590 ap->ops->sff_irq_clear(ap);
1594 * ata_sff_thaw - Thaw SFF controller port
1597 * Thaw SFF controller port.
1600 * Inherited from caller.
1602 void ata_sff_thaw(struct ata_port *ap)
1604 /* clear & re-enable interrupts */
1605 ap->ops->sff_check_status(ap);
1606 ap->ops->sff_irq_clear(ap);
1607 ap->ops->sff_irq_on(ap);
1611 * ata_sff_prereset - prepare SFF link for reset
1612 * @link: SFF link to be reset
1613 * @deadline: deadline jiffies for the operation
1615 * SFF link @link is about to be reset. Initialize it. It first
1616 * calls ata_std_prereset() and wait for !BSY if the port is
1620 * Kernel thread context (may sleep)
1623 * 0 on success, -errno otherwise.
1625 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1627 struct ata_port *ap = link->ap;
1628 struct ata_eh_context *ehc = &link->eh_context;
1631 rc = ata_std_prereset(link, deadline);
1635 /* if we're about to do hardreset, nothing more to do */
1636 if (ehc->i.action & ATA_EH_HARDRESET)
1639 /* wait for !BSY if we don't know that no device is attached */
1640 if (!ata_link_offline(link)) {
1641 rc = ata_sff_wait_ready(ap, deadline);
1642 if (rc && rc != -ENODEV) {
1643 ata_link_printk(link, KERN_WARNING, "device not ready "
1644 "(errno=%d), forcing hardreset\n", rc);
1645 ehc->i.action |= ATA_EH_HARDRESET;
1653 * ata_devchk - PATA device presence detection
1654 * @ap: ATA channel to examine
1655 * @device: Device to examine (starting at zero)
1657 * This technique was originally described in
1658 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1659 * later found its way into the ATA/ATAPI spec.
1661 * Write a pattern to the ATA shadow registers,
1662 * and if a device is present, it will respond by
1663 * correctly storing and echoing back the
1664 * ATA shadow register contents.
1669 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1671 struct ata_ioports *ioaddr = &ap->ioaddr;
1674 ap->ops->sff_dev_select(ap, device);
1676 iowrite8(0x55, ioaddr->nsect_addr);
1677 iowrite8(0xaa, ioaddr->lbal_addr);
1679 iowrite8(0xaa, ioaddr->nsect_addr);
1680 iowrite8(0x55, ioaddr->lbal_addr);
1682 iowrite8(0x55, ioaddr->nsect_addr);
1683 iowrite8(0xaa, ioaddr->lbal_addr);
1685 nsect = ioread8(ioaddr->nsect_addr);
1686 lbal = ioread8(ioaddr->lbal_addr);
1688 if ((nsect == 0x55) && (lbal == 0xaa))
1689 return 1; /* we found a device */
1691 return 0; /* nothing found */
1695 * ata_sff_dev_classify - Parse returned ATA device signature
1696 * @dev: ATA device to classify (starting at zero)
1697 * @present: device seems present
1698 * @r_err: Value of error register on completion
1700 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1701 * an ATA/ATAPI-defined set of values is placed in the ATA
1702 * shadow registers, indicating the results of device detection
1705 * Select the ATA device, and read the values from the ATA shadow
1706 * registers. Then parse according to the Error register value,
1707 * and the spec-defined values examined by ata_dev_classify().
1713 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1715 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1718 struct ata_port *ap = dev->link->ap;
1719 struct ata_taskfile tf;
1723 ap->ops->sff_dev_select(ap, dev->devno);
1725 memset(&tf, 0, sizeof(tf));
1727 ap->ops->sff_tf_read(ap, &tf);
1732 /* see if device passed diags: continue and warn later */
1734 /* diagnostic fail : do nothing _YET_ */
1735 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1738 else if ((dev->devno == 0) && (err == 0x81))
1741 return ATA_DEV_NONE;
1743 /* determine if device is ATA or ATAPI */
1744 class = ata_dev_classify(&tf);
1746 if (class == ATA_DEV_UNKNOWN) {
1747 /* If the device failed diagnostic, it's likely to
1748 * have reported incorrect device signature too.
1749 * Assume ATA device if the device seems present but
1750 * device signature is invalid with diagnostic
1753 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1754 class = ATA_DEV_ATA;
1756 class = ATA_DEV_NONE;
1757 } else if ((class == ATA_DEV_ATA) &&
1758 (ap->ops->sff_check_status(ap) == 0))
1759 class = ATA_DEV_NONE;
1764 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
1765 unsigned long deadline)
1767 struct ata_ioports *ioaddr = &ap->ioaddr;
1768 unsigned int dev0 = devmask & (1 << 0);
1769 unsigned int dev1 = devmask & (1 << 1);
1772 /* if device 0 was found in ata_devchk, wait for its
1776 rc = ata_sff_wait_ready(ap, deadline);
1784 /* if device 1 was found in ata_devchk, wait for register
1785 * access briefly, then wait for BSY to clear.
1790 ap->ops->sff_dev_select(ap, 1);
1792 /* Wait for register access. Some ATAPI devices fail
1793 * to set nsect/lbal after reset, so don't waste too
1794 * much time on it. We're gonna wait for !BSY anyway.
1796 for (i = 0; i < 2; i++) {
1799 nsect = ioread8(ioaddr->nsect_addr);
1800 lbal = ioread8(ioaddr->lbal_addr);
1801 if ((nsect == 1) && (lbal == 1))
1803 msleep(50); /* give drive a breather */
1806 rc = ata_sff_wait_ready(ap, deadline);
1814 /* is all this really necessary? */
1815 ap->ops->sff_dev_select(ap, 0);
1817 ap->ops->sff_dev_select(ap, 1);
1819 ap->ops->sff_dev_select(ap, 0);
1825 * ata_sff_wait_after_reset - wait before checking status after reset
1826 * @ap: port containing status register to be polled
1827 * @deadline: deadline jiffies for the operation
1829 * After reset, we need to pause a while before reading status.
1830 * Also, certain combination of controller and device report 0xff
1831 * for some duration (e.g. until SATA PHY is up and running)
1832 * which is interpreted as empty port in ATA world. This
1833 * function also waits for such devices to get out of 0xff
1837 * Kernel thread context (may sleep).
1839 void ata_sff_wait_after_reset(struct ata_port *ap, unsigned long deadline)
1841 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
1843 if (time_before(until, deadline))
1846 /* Spec mandates ">= 2ms" before checking status. We wait
1847 * 150ms, because that was the magic delay used for ATAPI
1848 * devices in Hale Landis's ATADRVR, for the period of time
1849 * between when the ATA command register is written, and then
1850 * status is checked. Because waiting for "a while" before
1851 * checking status is fine, post SRST, we perform this magic
1852 * delay here as well.
1854 * Old drivers/ide uses the 2mS rule and then waits for ready.
1858 /* Wait for 0xff to clear. Some SATA devices take a long time
1859 * to clear 0xff after reset. For example, HHD424020F7SV00
1860 * iVDR needs >= 800ms while. Quantum GoVault needs even more
1863 * Note that some PATA controllers (pata_ali) explode if
1864 * status register is read more than once when there's no
1867 if (ap->flags & ATA_FLAG_SATA) {
1869 u8 status = ap->ops->sff_check_status(ap);
1871 if (status != 0xff || time_after(jiffies, deadline))
1879 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1880 unsigned long deadline)
1882 struct ata_ioports *ioaddr = &ap->ioaddr;
1884 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1886 /* software reset. causes dev0 to be selected */
1887 iowrite8(ap->ctl, ioaddr->ctl_addr);
1888 udelay(20); /* FIXME: flush */
1889 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1890 udelay(20); /* FIXME: flush */
1891 iowrite8(ap->ctl, ioaddr->ctl_addr);
1893 /* wait a while before checking status */
1894 ata_sff_wait_after_reset(ap, deadline);
1896 /* Before we perform post reset processing we want to see if
1897 * the bus shows 0xFF because the odd clown forgets the D7
1898 * pulldown resistor.
1900 if (ap->ops->sff_check_status(ap) == 0xFF)
1903 return ata_bus_post_reset(ap, devmask, deadline);
1907 * ata_sff_softreset - reset host port via ATA SRST
1908 * @link: ATA link to reset
1909 * @classes: resulting classes of attached devices
1910 * @deadline: deadline jiffies for the operation
1912 * Reset host port using ATA SRST.
1915 * Kernel thread context (may sleep)
1918 * 0 on success, -errno otherwise.
1920 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1921 unsigned long deadline)
1923 struct ata_port *ap = link->ap;
1924 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1925 unsigned int devmask = 0;
1931 if (ata_link_offline(link)) {
1932 classes[0] = ATA_DEV_NONE;
1936 /* determine if device 0/1 are present */
1937 if (ata_devchk(ap, 0))
1938 devmask |= (1 << 0);
1939 if (slave_possible && ata_devchk(ap, 1))
1940 devmask |= (1 << 1);
1942 /* select device 0 again */
1943 ap->ops->sff_dev_select(ap, 0);
1945 /* issue bus reset */
1946 DPRINTK("about to softreset, devmask=%x\n", devmask);
1947 rc = ata_bus_softreset(ap, devmask, deadline);
1948 /* if link is occupied, -ENODEV too is an error */
1949 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
1950 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
1954 /* determine by signature whether we have ATA or ATAPI devices */
1955 classes[0] = ata_sff_dev_classify(&link->device[0],
1956 devmask & (1 << 0), &err);
1957 if (slave_possible && err != 0x81)
1958 classes[1] = ata_sff_dev_classify(&link->device[1],
1959 devmask & (1 << 1), &err);
1962 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
1967 * sata_sff_hardreset - reset host port via SATA phy reset
1968 * @link: link to reset
1969 * @class: resulting class of attached device
1970 * @deadline: deadline jiffies for the operation
1972 * SATA phy-reset host port using DET bits of SControl register,
1973 * wait for !BSY and classify the attached device.
1976 * Kernel thread context (may sleep)
1979 * 0 on success, -errno otherwise.
1981 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
1982 unsigned long deadline)
1984 struct ata_port *ap = link->ap;
1985 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1991 rc = sata_link_hardreset(link, timing, deadline);
1993 ata_link_printk(link, KERN_ERR,
1994 "COMRESET failed (errno=%d)\n", rc);
1998 /* TODO: phy layer with polling, timeouts, etc. */
1999 if (ata_link_offline(link)) {
2000 *class = ATA_DEV_NONE;
2001 DPRINTK("EXIT, link offline\n");
2005 /* wait a while before checking status */
2006 ata_sff_wait_after_reset(ap, deadline);
2008 /* If PMP is supported, we have to do follow-up SRST. Note
2009 * that some PMPs don't send D2H Reg FIS after hardreset at
2010 * all if the first port is empty. Wait for it just for a
2011 * second and request follow-up SRST.
2013 if (ap->flags & ATA_FLAG_PMP) {
2014 ata_sff_wait_ready(ap, jiffies + HZ);
2018 rc = ata_sff_wait_ready(ap, deadline);
2019 /* link occupied, -ENODEV too is an error */
2021 ata_link_printk(link, KERN_ERR,
2022 "COMRESET failed (errno=%d)\n", rc);
2026 ap->ops->sff_dev_select(ap, 0); /* probably unnecessary */
2028 *class = ata_sff_dev_classify(link->device, 1, NULL);
2030 DPRINTK("EXIT, class=%u\n", *class);
2035 * ata_sff_error_handler - Stock error handler for BMDMA controller
2036 * @ap: port to handle error for
2038 * Stock error handler for SFF controller. It can handle both
2039 * PATA and SATA controllers. Many controllers should be able to
2040 * use this EH as-is or with some added handling before and
2044 * Kernel thread context (may sleep)
2046 void ata_sff_error_handler(struct ata_port *ap)
2048 ata_reset_fn_t softreset = ap->ops->softreset;
2049 ata_reset_fn_t hardreset = ap->ops->hardreset;
2050 struct ata_queued_cmd *qc;
2051 unsigned long flags;
2054 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2055 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2058 /* reset PIO HSM and stop DMA engine */
2059 spin_lock_irqsave(ap->lock, flags);
2061 ap->hsm_task_state = HSM_ST_IDLE;
2063 if (ap->ioaddr.bmdma_addr &&
2064 qc && (qc->tf.protocol == ATA_PROT_DMA ||
2065 qc->tf.protocol == ATAPI_PROT_DMA)) {
2068 host_stat = ap->ops->bmdma_status(ap);
2070 /* BMDMA controllers indicate host bus error by
2071 * setting DMA_ERR bit and timing out. As it wasn't
2072 * really a timeout event, adjust error mask and
2073 * cancel frozen state.
2075 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2076 qc->err_mask = AC_ERR_HOST_BUS;
2080 ap->ops->bmdma_stop(qc);
2083 ata_sff_altstatus(ap);
2084 ap->ops->sff_check_status(ap);
2085 ap->ops->sff_irq_clear(ap);
2087 spin_unlock_irqrestore(ap->lock, flags);
2090 ata_eh_thaw_port(ap);
2092 /* PIO and DMA engines have been stopped, perform recovery */
2094 /* ata_sff_softreset and sata_sff_hardreset are inherited to
2095 * all SFF drivers from ata_sff_port_ops. Ignore softreset if
2096 * ctl isn't accessible. Ignore hardreset if SCR access isn't
2099 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2101 if (hardreset == sata_sff_hardreset && !sata_scr_valid(&ap->link))
2104 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2105 ap->ops->postreset);
2109 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller
2110 * @qc: internal command to clean up
2113 * Kernel thread context (may sleep)
2115 void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2117 if (qc->ap->ioaddr.bmdma_addr)
2122 * ata_sff_port_start - Set port up for dma.
2123 * @ap: Port to initialize
2125 * Called just after data structures for each port are
2126 * initialized. Allocates space for PRD table if the device
2127 * is DMA capable SFF.
2129 * May be used as the port_start() entry in ata_port_operations.
2132 * Inherited from caller.
2134 int ata_sff_port_start(struct ata_port *ap)
2136 if (ap->ioaddr.bmdma_addr)
2137 return ata_port_start(ap);
2142 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2143 * @ioaddr: IO address structure to be initialized
2145 * Utility function which initializes data_addr, error_addr,
2146 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2147 * device_addr, status_addr, and command_addr to standard offsets
2148 * relative to cmd_addr.
2150 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2152 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2154 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2155 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2156 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2157 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2158 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2159 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2160 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2161 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2162 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2163 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2166 unsigned long ata_bmdma_mode_filter(struct ata_device *adev,
2167 unsigned long xfer_mask)
2169 /* Filter out DMA modes if the device has been configured by
2170 the BIOS as PIO only */
2172 if (adev->link->ap->ioaddr.bmdma_addr == NULL)
2173 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2178 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2179 * @qc: Info associated with this ATA transaction.
2182 * spin_lock_irqsave(host lock)
2184 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2186 struct ata_port *ap = qc->ap;
2187 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2190 /* load PRD table addr. */
2191 mb(); /* make sure PRD table writes are visible to controller */
2192 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2194 /* specify data direction, triple-check start bit is clear */
2195 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2196 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2198 dmactl |= ATA_DMA_WR;
2199 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2201 /* issue r/w command */
2202 ap->ops->sff_exec_command(ap, &qc->tf);
2206 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2207 * @qc: Info associated with this ATA transaction.
2210 * spin_lock_irqsave(host lock)
2212 void ata_bmdma_start(struct ata_queued_cmd *qc)
2214 struct ata_port *ap = qc->ap;
2217 /* start host DMA transaction */
2218 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2219 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2221 /* Strictly, one may wish to issue an ioread8() here, to
2222 * flush the mmio write. However, control also passes
2223 * to the hardware at this point, and it will interrupt
2224 * us when we are to resume control. So, in effect,
2225 * we don't care when the mmio write flushes.
2226 * Further, a read of the DMA status register _immediately_
2227 * following the write may not be what certain flaky hardware
2228 * is expected, so I think it is best to not add a readb()
2229 * without first all the MMIO ATA cards/mobos.
2230 * Or maybe I'm just being paranoid.
2232 * FIXME: The posting of this write means I/O starts are
2233 * unneccessarily delayed for MMIO
2238 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
2239 * @qc: Command we are ending DMA for
2241 * Clears the ATA_DMA_START flag in the dma control register
2243 * May be used as the bmdma_stop() entry in ata_port_operations.
2246 * spin_lock_irqsave(host lock)
2248 void ata_bmdma_stop(struct ata_queued_cmd *qc)
2250 struct ata_port *ap = qc->ap;
2251 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2253 /* clear start/stop bit */
2254 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
2255 mmio + ATA_DMA_CMD);
2257 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
2258 ata_sff_altstatus(ap); /* dummy read */
2262 * ata_bmdma_status - Read PCI IDE BMDMA status
2263 * @ap: Port associated with this ATA transaction.
2265 * Read and return BMDMA status register.
2267 * May be used as the bmdma_status() entry in ata_port_operations.
2270 * spin_lock_irqsave(host lock)
2272 u8 ata_bmdma_status(struct ata_port *ap)
2274 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
2278 * ata_bus_reset - reset host port and associated ATA channel
2279 * @ap: port to reset
2281 * This is typically the first time we actually start issuing
2282 * commands to the ATA channel. We wait for BSY to clear, then
2283 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2284 * result. Determine what devices, if any, are on the channel
2285 * by looking at the device 0/1 error register. Look at the signature
2286 * stored in each device's taskfile registers, to determine if
2287 * the device is ATA or ATAPI.
2290 * PCI/etc. bus probe sem.
2291 * Obtains host lock.
2294 * Sets ATA_FLAG_DISABLED if bus reset fails.
2297 * This function is only for drivers which still use old EH and
2298 * will be removed soon.
2300 void ata_bus_reset(struct ata_port *ap)
2302 struct ata_device *device = ap->link.device;
2303 struct ata_ioports *ioaddr = &ap->ioaddr;
2304 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2306 unsigned int dev0, dev1 = 0, devmask = 0;
2309 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2311 /* determine if device 0/1 are present */
2312 if (ap->flags & ATA_FLAG_SATA_RESET)
2315 dev0 = ata_devchk(ap, 0);
2317 dev1 = ata_devchk(ap, 1);
2321 devmask |= (1 << 0);
2323 devmask |= (1 << 1);
2325 /* select device 0 again */
2326 ap->ops->sff_dev_select(ap, 0);
2328 /* issue bus reset */
2329 if (ap->flags & ATA_FLAG_SRST) {
2330 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
2331 if (rc && rc != -ENODEV)
2336 * determine by signature whether we have ATA or ATAPI devices
2338 device[0].class = ata_sff_dev_classify(&device[0], dev0, &err);
2339 if ((slave_possible) && (err != 0x81))
2340 device[1].class = ata_sff_dev_classify(&device[1], dev1, &err);
2342 /* is double-select really necessary? */
2343 if (device[1].class != ATA_DEV_NONE)
2344 ap->ops->sff_dev_select(ap, 1);
2345 if (device[0].class != ATA_DEV_NONE)
2346 ap->ops->sff_dev_select(ap, 0);
2348 /* if no devices were detected, disable this port */
2349 if ((device[0].class == ATA_DEV_NONE) &&
2350 (device[1].class == ATA_DEV_NONE))
2353 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2354 /* set up device control for ATA_FLAG_SATA_RESET */
2355 iowrite8(ap->ctl, ioaddr->ctl_addr);
2362 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2363 ata_port_disable(ap);
2371 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
2374 * Some PCI ATA devices report simplex mode but in fact can be told to
2375 * enter non simplex mode. This implements the necessary logic to
2376 * perform the task on such devices. Calling it on other devices will
2377 * have -undefined- behaviour.
2379 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
2381 unsigned long bmdma = pci_resource_start(pdev, 4);
2387 simplex = inb(bmdma + 0x02);
2388 outb(simplex & 0x60, bmdma + 0x02);
2389 simplex = inb(bmdma + 0x02);
2396 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
2397 * @host: target ATA host
2399 * Acquire PCI BMDMA resources and initialize @host accordingly.
2402 * Inherited from calling layer (may sleep).
2405 * 0 on success, -errno otherwise.
2407 int ata_pci_bmdma_init(struct ata_host *host)
2409 struct device *gdev = host->dev;
2410 struct pci_dev *pdev = to_pci_dev(gdev);
2413 /* No BAR4 allocation: No DMA */
2414 if (pci_resource_start(pdev, 4) == 0)
2417 /* TODO: If we get no DMA mask we should fall back to PIO */
2418 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
2421 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
2425 /* request and iomap DMA region */
2426 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
2428 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
2431 host->iomap = pcim_iomap_table(pdev);
2433 for (i = 0; i < 2; i++) {
2434 struct ata_port *ap = host->ports[i];
2435 void __iomem *bmdma = host->iomap[4] + 8 * i;
2437 if (ata_port_is_dummy(ap))
2440 ap->ioaddr.bmdma_addr = bmdma;
2441 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
2442 (ioread8(bmdma + 2) & 0x80))
2443 host->flags |= ATA_HOST_SIMPLEX;
2445 ata_port_desc(ap, "bmdma 0x%llx",
2446 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
2452 static int ata_resources_present(struct pci_dev *pdev, int port)
2456 /* Check the PCI resources for this channel are enabled */
2458 for (i = 0; i < 2; i ++) {
2459 if (pci_resource_start(pdev, port + i) == 0 ||
2460 pci_resource_len(pdev, port + i) == 0)
2467 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2468 * @host: target ATA host
2470 * Acquire native PCI ATA resources for @host and initialize the
2471 * first two ports of @host accordingly. Ports marked dummy are
2472 * skipped and allocation failure makes the port dummy.
2474 * Note that native PCI resources are valid even for legacy hosts
2475 * as we fix up pdev resources array early in boot, so this
2476 * function can be used for both native and legacy SFF hosts.
2479 * Inherited from calling layer (may sleep).
2482 * 0 if at least one port is initialized, -ENODEV if no port is
2485 int ata_pci_sff_init_host(struct ata_host *host)
2487 struct device *gdev = host->dev;
2488 struct pci_dev *pdev = to_pci_dev(gdev);
2489 unsigned int mask = 0;
2492 /* request, iomap BARs and init port addresses accordingly */
2493 for (i = 0; i < 2; i++) {
2494 struct ata_port *ap = host->ports[i];
2496 void __iomem * const *iomap;
2498 if (ata_port_is_dummy(ap))
2501 /* Discard disabled ports. Some controllers show
2502 * their unused channels this way. Disabled ports are
2505 if (!ata_resources_present(pdev, i)) {
2506 ap->ops = &ata_dummy_port_ops;
2510 rc = pcim_iomap_regions(pdev, 0x3 << base,
2511 dev_driver_string(gdev));
2513 dev_printk(KERN_WARNING, gdev,
2514 "failed to request/iomap BARs for port %d "
2515 "(errno=%d)\n", i, rc);
2517 pcim_pin_device(pdev);
2518 ap->ops = &ata_dummy_port_ops;
2521 host->iomap = iomap = pcim_iomap_table(pdev);
2523 ap->ioaddr.cmd_addr = iomap[base];
2524 ap->ioaddr.altstatus_addr =
2525 ap->ioaddr.ctl_addr = (void __iomem *)
2526 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2527 ata_sff_std_ports(&ap->ioaddr);
2529 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2530 (unsigned long long)pci_resource_start(pdev, base),
2531 (unsigned long long)pci_resource_start(pdev, base + 1));
2537 dev_printk(KERN_ERR, gdev, "no available native port\n");
2545 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host
2546 * @pdev: target PCI device
2547 * @ppi: array of port_info, must be enough for two ports
2548 * @r_host: out argument for the initialized ATA host
2550 * Helper to allocate ATA host for @pdev, acquire all native PCI
2551 * resources and initialize it accordingly in one go.
2554 * Inherited from calling layer (may sleep).
2557 * 0 on success, -errno otherwise.
2559 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2560 const struct ata_port_info * const * ppi,
2561 struct ata_host **r_host)
2563 struct ata_host *host;
2566 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2569 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2571 dev_printk(KERN_ERR, &pdev->dev,
2572 "failed to allocate ATA host\n");
2577 rc = ata_pci_sff_init_host(host);
2581 /* init DMA related stuff */
2582 rc = ata_pci_bmdma_init(host);
2586 devres_remove_group(&pdev->dev, NULL);
2591 /* This is necessary because PCI and iomap resources are
2592 * merged and releasing the top group won't release the
2593 * acquired resources if some of those have been acquired
2594 * before entering this function.
2596 pcim_iounmap_regions(pdev, 0xf);
2598 devres_release_group(&pdev->dev, NULL);
2603 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2604 * @host: target SFF ATA host
2605 * @irq_handler: irq_handler used when requesting IRQ(s)
2606 * @sht: scsi_host_template to use when registering the host
2608 * This is the counterpart of ata_host_activate() for SFF ATA
2609 * hosts. This separate helper is necessary because SFF hosts
2610 * use two separate interrupts in legacy mode.
2613 * Inherited from calling layer (may sleep).
2616 * 0 on success, -errno otherwise.
2618 int ata_pci_sff_activate_host(struct ata_host *host,
2619 irq_handler_t irq_handler,
2620 struct scsi_host_template *sht)
2622 struct device *dev = host->dev;
2623 struct pci_dev *pdev = to_pci_dev(dev);
2624 const char *drv_name = dev_driver_string(host->dev);
2625 int legacy_mode = 0, rc;
2627 rc = ata_host_start(host);
2631 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2634 /* TODO: What if one channel is in native mode ... */
2635 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2636 mask = (1 << 2) | (1 << 0);
2637 if ((tmp8 & mask) != mask)
2639 #if defined(CONFIG_NO_ATA_LEGACY)
2640 /* Some platforms with PCI limits cannot address compat
2641 port space. In that case we punt if their firmware has
2642 left a device in compatibility mode */
2644 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2650 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2653 if (!legacy_mode && pdev->irq) {
2654 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2655 IRQF_SHARED, drv_name, host);
2659 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
2660 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
2661 } else if (legacy_mode) {
2662 if (!ata_port_is_dummy(host->ports[0])) {
2663 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2664 irq_handler, IRQF_SHARED,
2669 ata_port_desc(host->ports[0], "irq %d",
2670 ATA_PRIMARY_IRQ(pdev));
2673 if (!ata_port_is_dummy(host->ports[1])) {
2674 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2675 irq_handler, IRQF_SHARED,
2680 ata_port_desc(host->ports[1], "irq %d",
2681 ATA_SECONDARY_IRQ(pdev));
2685 rc = ata_host_register(host, sht);
2688 devres_remove_group(dev, NULL);
2690 devres_release_group(dev, NULL);
2696 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller
2697 * @pdev: Controller to be initialized
2698 * @ppi: array of port_info, must be enough for two ports
2699 * @sht: scsi_host_template to use when registering the host
2700 * @host_priv: host private_data
2702 * This is a helper function which can be called from a driver's
2703 * xxx_init_one() probe function if the hardware uses traditional
2704 * IDE taskfile registers.
2706 * This function calls pci_enable_device(), reserves its register
2707 * regions, sets the dma mask, enables bus master mode, and calls
2711 * Nobody makes a single channel controller that appears solely as
2712 * the secondary legacy port on PCI.
2715 * Inherited from PCI layer (may sleep).
2718 * Zero on success, negative on errno-based value on error.
2720 int ata_pci_sff_init_one(struct pci_dev *pdev,
2721 const struct ata_port_info * const * ppi,
2722 struct scsi_host_template *sht, void *host_priv)
2724 struct device *dev = &pdev->dev;
2725 const struct ata_port_info *pi = NULL;
2726 struct ata_host *host = NULL;
2731 /* look up the first valid port_info */
2732 for (i = 0; i < 2 && ppi[i]; i++) {
2733 if (ppi[i]->port_ops != &ata_dummy_port_ops) {
2740 dev_printk(KERN_ERR, &pdev->dev,
2741 "no valid port_info specified\n");
2745 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2748 rc = pcim_enable_device(pdev);
2752 /* prepare and activate SFF host */
2753 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2756 host->private_data = host_priv;
2758 pci_set_master(pdev);
2759 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2762 devres_remove_group(&pdev->dev, NULL);
2764 devres_release_group(&pdev->dev, NULL);
2769 #endif /* CONFIG_PCI */
2771 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
2772 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2773 EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
2774 EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
2775 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
2776 EXPORT_SYMBOL_GPL(ata_sff_check_status);
2777 EXPORT_SYMBOL_GPL(ata_sff_altstatus);
2778 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
2779 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
2780 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
2781 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
2782 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
2783 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
2784 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
2785 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
2786 EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
2787 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
2788 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
2789 EXPORT_SYMBOL_GPL(ata_sff_host_intr);
2790 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
2791 EXPORT_SYMBOL_GPL(ata_sff_freeze);
2792 EXPORT_SYMBOL_GPL(ata_sff_thaw);
2793 EXPORT_SYMBOL_GPL(ata_sff_prereset);
2794 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
2795 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
2796 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2797 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2798 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2799 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2800 EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd);
2801 EXPORT_SYMBOL_GPL(ata_sff_port_start);
2802 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2803 EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter);
2804 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2805 EXPORT_SYMBOL_GPL(ata_bmdma_start);
2806 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
2807 EXPORT_SYMBOL_GPL(ata_bmdma_status);
2808 EXPORT_SYMBOL_GPL(ata_bus_reset);
2810 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
2811 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
2812 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2813 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2814 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2815 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2816 #endif /* CONFIG_PCI */