2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 /* debounce timing parameters in msecs { interval, duration, timeout } */
65 const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
69 static unsigned int ata_dev_init_params(struct ata_device *dev,
70 u16 heads, u16 sectors);
71 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned int ata_unique_id = 1;
75 static struct workqueue_struct *ata_wq;
77 struct workqueue_struct *ata_aux_wq;
79 int atapi_enabled = 1;
80 module_param(atapi_enabled, int, 0444);
81 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 module_param(atapi_dmadir, int, 0444);
85 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 module_param_named(fua, libata_fua, int, 0444);
89 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91 MODULE_AUTHOR("Jeff Garzik");
92 MODULE_DESCRIPTION("Library module for ATA devices");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_VERSION);
98 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
99 * @tf: Taskfile to convert
100 * @fis: Buffer into which data will output
101 * @pmp: Port multiplier port
103 * Converts a standard ATA taskfile to a Serial ATA
104 * FIS structure (Register - Host to Device).
107 * Inherited from caller.
110 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
112 fis[0] = 0x27; /* Register - Host to Device FIS */
113 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
114 bit 7 indicates Command FIS */
115 fis[2] = tf->command;
116 fis[3] = tf->feature;
123 fis[8] = tf->hob_lbal;
124 fis[9] = tf->hob_lbam;
125 fis[10] = tf->hob_lbah;
126 fis[11] = tf->hob_feature;
129 fis[13] = tf->hob_nsect;
140 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
141 * @fis: Buffer from which data will be input
142 * @tf: Taskfile to output
144 * Converts a serial ATA FIS structure to a standard ATA taskfile.
147 * Inherited from caller.
150 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
152 tf->command = fis[2]; /* status */
153 tf->feature = fis[3]; /* error */
160 tf->hob_lbal = fis[8];
161 tf->hob_lbam = fis[9];
162 tf->hob_lbah = fis[10];
165 tf->hob_nsect = fis[13];
168 static const u8 ata_rw_cmds[] = {
172 ATA_CMD_READ_MULTI_EXT,
173 ATA_CMD_WRITE_MULTI_EXT,
177 ATA_CMD_WRITE_MULTI_FUA_EXT,
181 ATA_CMD_PIO_READ_EXT,
182 ATA_CMD_PIO_WRITE_EXT,
195 ATA_CMD_WRITE_FUA_EXT
199 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
200 * @qc: command to examine and configure
202 * Examine the device configuration and tf->flags to calculate
203 * the proper read/write commands and protocol to use.
208 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
210 struct ata_taskfile *tf = &qc->tf;
211 struct ata_device *dev = qc->dev;
214 int index, fua, lba48, write;
216 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
217 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
218 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
220 if (dev->flags & ATA_DFLAG_PIO) {
221 tf->protocol = ATA_PROT_PIO;
222 index = dev->multi_count ? 0 : 8;
223 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
224 /* Unable to use DMA due to host limitation */
225 tf->protocol = ATA_PROT_PIO;
226 index = dev->multi_count ? 0 : 8;
228 tf->protocol = ATA_PROT_DMA;
232 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
242 * @pio_mask: pio_mask
243 * @mwdma_mask: mwdma_mask
244 * @udma_mask: udma_mask
246 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
247 * unsigned int xfer_mask.
255 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
256 unsigned int mwdma_mask,
257 unsigned int udma_mask)
259 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
260 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
261 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
265 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
266 * @xfer_mask: xfer_mask to unpack
267 * @pio_mask: resulting pio_mask
268 * @mwdma_mask: resulting mwdma_mask
269 * @udma_mask: resulting udma_mask
271 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
272 * Any NULL distination masks will be ignored.
274 static void ata_unpack_xfermask(unsigned int xfer_mask,
275 unsigned int *pio_mask,
276 unsigned int *mwdma_mask,
277 unsigned int *udma_mask)
280 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
282 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
284 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287 static const struct ata_xfer_ent {
291 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
292 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
293 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
298 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
299 * @xfer_mask: xfer_mask of interest
301 * Return matching XFER_* value for @xfer_mask. Only the highest
302 * bit of @xfer_mask is considered.
308 * Matching XFER_* value, 0 if no match found.
310 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
312 int highbit = fls(xfer_mask) - 1;
313 const struct ata_xfer_ent *ent;
315 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
316 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
317 return ent->base + highbit - ent->shift;
322 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
323 * @xfer_mode: XFER_* of interest
325 * Return matching xfer_mask for @xfer_mode.
331 * Matching xfer_mask, 0 if no match found.
333 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
335 const struct ata_xfer_ent *ent;
337 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
338 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
339 return 1 << (ent->shift + xfer_mode - ent->base);
344 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
345 * @xfer_mode: XFER_* of interest
347 * Return matching xfer_shift for @xfer_mode.
353 * Matching xfer_shift, -1 if no match found.
355 static int ata_xfer_mode2shift(unsigned int xfer_mode)
357 const struct ata_xfer_ent *ent;
359 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
360 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
366 * ata_mode_string - convert xfer_mask to string
367 * @xfer_mask: mask of bits supported; only highest bit counts.
369 * Determine string which represents the highest speed
370 * (highest bit in @modemask).
376 * Constant C string representing highest speed listed in
377 * @mode_mask, or the constant C string "<n/a>".
379 static const char *ata_mode_string(unsigned int xfer_mask)
381 static const char * const xfer_mode_str[] = {
401 highbit = fls(xfer_mask) - 1;
402 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
403 return xfer_mode_str[highbit];
407 static const char *sata_spd_string(unsigned int spd)
409 static const char * const spd_str[] = {
414 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
416 return spd_str[spd - 1];
419 void ata_dev_disable(struct ata_device *dev)
421 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
422 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
428 * ata_pio_devchk - PATA device presence detection
429 * @ap: ATA channel to examine
430 * @device: Device to examine (starting at zero)
432 * This technique was originally described in
433 * Hale Landis's ATADRVR (www.ata-atapi.com), and
434 * later found its way into the ATA/ATAPI spec.
436 * Write a pattern to the ATA shadow registers,
437 * and if a device is present, it will respond by
438 * correctly storing and echoing back the
439 * ATA shadow register contents.
445 static unsigned int ata_pio_devchk(struct ata_port *ap,
448 struct ata_ioports *ioaddr = &ap->ioaddr;
451 ap->ops->dev_select(ap, device);
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
456 outb(0xaa, ioaddr->nsect_addr);
457 outb(0x55, ioaddr->lbal_addr);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 nsect = inb(ioaddr->nsect_addr);
463 lbal = inb(ioaddr->lbal_addr);
465 if ((nsect == 0x55) && (lbal == 0xaa))
466 return 1; /* we found a device */
468 return 0; /* nothing found */
472 * ata_mmio_devchk - PATA device presence detection
473 * @ap: ATA channel to examine
474 * @device: Device to examine (starting at zero)
476 * This technique was originally described in
477 * Hale Landis's ATADRVR (www.ata-atapi.com), and
478 * later found its way into the ATA/ATAPI spec.
480 * Write a pattern to the ATA shadow registers,
481 * and if a device is present, it will respond by
482 * correctly storing and echoing back the
483 * ATA shadow register contents.
489 static unsigned int ata_mmio_devchk(struct ata_port *ap,
492 struct ata_ioports *ioaddr = &ap->ioaddr;
495 ap->ops->dev_select(ap, device);
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
500 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 nsect = readb((void __iomem *) ioaddr->nsect_addr);
507 lbal = readb((void __iomem *) ioaddr->lbal_addr);
509 if ((nsect == 0x55) && (lbal == 0xaa))
510 return 1; /* we found a device */
512 return 0; /* nothing found */
516 * ata_devchk - PATA device presence detection
517 * @ap: ATA channel to examine
518 * @device: Device to examine (starting at zero)
520 * Dispatch ATA device presence detection, depending
521 * on whether we are using PIO or MMIO to talk to the
522 * ATA shadow registers.
528 static unsigned int ata_devchk(struct ata_port *ap,
531 if (ap->flags & ATA_FLAG_MMIO)
532 return ata_mmio_devchk(ap, device);
533 return ata_pio_devchk(ap, device);
537 * ata_dev_classify - determine device type based on ATA-spec signature
538 * @tf: ATA taskfile register set for device to be identified
540 * Determine from taskfile register contents whether a device is
541 * ATA or ATAPI, as per "Signature and persistence" section
542 * of ATA/PI spec (volume 1, sect 5.14).
548 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
549 * the event of failure.
552 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
554 /* Apple's open source Darwin code hints that some devices only
555 * put a proper signature into the LBA mid/high registers,
556 * So, we only check those. It's sufficient for uniqueness.
559 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
560 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
561 DPRINTK("found ATA device by sig\n");
565 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
566 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
567 DPRINTK("found ATAPI device by sig\n");
568 return ATA_DEV_ATAPI;
571 DPRINTK("unknown device\n");
572 return ATA_DEV_UNKNOWN;
576 * ata_dev_try_classify - Parse returned ATA device signature
577 * @ap: ATA channel to examine
578 * @device: Device to examine (starting at zero)
579 * @r_err: Value of error register on completion
581 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
582 * an ATA/ATAPI-defined set of values is placed in the ATA
583 * shadow registers, indicating the results of device detection
586 * Select the ATA device, and read the values from the ATA shadow
587 * registers. Then parse according to the Error register value,
588 * and the spec-defined values examined by ata_dev_classify().
594 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
598 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
600 struct ata_taskfile tf;
604 ap->ops->dev_select(ap, device);
606 memset(&tf, 0, sizeof(tf));
608 ap->ops->tf_read(ap, &tf);
613 /* see if device passed diags */
616 else if ((device == 0) && (err == 0x81))
621 /* determine if device is ATA or ATAPI */
622 class = ata_dev_classify(&tf);
624 if (class == ATA_DEV_UNKNOWN)
626 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
632 * ata_id_string - Convert IDENTIFY DEVICE page into string
633 * @id: IDENTIFY DEVICE results we will examine
634 * @s: string into which data is output
635 * @ofs: offset into identify device page
636 * @len: length of string to return. must be an even number.
638 * The strings in the IDENTIFY DEVICE page are broken up into
639 * 16-bit chunks. Run through the string, and output each
640 * 8-bit chunk linearly, regardless of platform.
646 void ata_id_string(const u16 *id, unsigned char *s,
647 unsigned int ofs, unsigned int len)
666 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
667 * @id: IDENTIFY DEVICE results we will examine
668 * @s: string into which data is output
669 * @ofs: offset into identify device page
670 * @len: length of string to return. must be an odd number.
672 * This function is identical to ata_id_string except that it
673 * trims trailing spaces and terminates the resulting string with
674 * null. @len must be actual maximum length (even number) + 1.
679 void ata_id_c_string(const u16 *id, unsigned char *s,
680 unsigned int ofs, unsigned int len)
686 ata_id_string(id, s, ofs, len - 1);
688 p = s + strnlen(s, len - 1);
689 while (p > s && p[-1] == ' ')
694 static u64 ata_id_n_sectors(const u16 *id)
696 if (ata_id_has_lba(id)) {
697 if (ata_id_has_lba48(id))
698 return ata_id_u64(id, 100);
700 return ata_id_u32(id, 60);
702 if (ata_id_current_chs_valid(id))
703 return ata_id_u32(id, 57);
705 return id[1] * id[3] * id[6];
710 * ata_noop_dev_select - Select device 0/1 on ATA bus
711 * @ap: ATA channel to manipulate
712 * @device: ATA device (numbered from zero) to select
714 * This function performs no actual function.
716 * May be used as the dev_select() entry in ata_port_operations.
721 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
727 * ata_std_dev_select - Select device 0/1 on ATA bus
728 * @ap: ATA channel to manipulate
729 * @device: ATA device (numbered from zero) to select
731 * Use the method defined in the ATA specification to
732 * make either device 0, or device 1, active on the
733 * ATA channel. Works with both PIO and MMIO.
735 * May be used as the dev_select() entry in ata_port_operations.
741 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
746 tmp = ATA_DEVICE_OBS;
748 tmp = ATA_DEVICE_OBS | ATA_DEV1;
750 if (ap->flags & ATA_FLAG_MMIO) {
751 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
753 outb(tmp, ap->ioaddr.device_addr);
755 ata_pause(ap); /* needed; also flushes, for mmio */
759 * ata_dev_select - Select device 0/1 on ATA bus
760 * @ap: ATA channel to manipulate
761 * @device: ATA device (numbered from zero) to select
762 * @wait: non-zero to wait for Status register BSY bit to clear
763 * @can_sleep: non-zero if context allows sleeping
765 * Use the method defined in the ATA specification to
766 * make either device 0, or device 1, active on the
769 * This is a high-level version of ata_std_dev_select(),
770 * which additionally provides the services of inserting
771 * the proper pauses and status polling, where needed.
777 void ata_dev_select(struct ata_port *ap, unsigned int device,
778 unsigned int wait, unsigned int can_sleep)
780 if (ata_msg_probe(ap)) {
781 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
782 "device %u, wait %u\n",
783 ap->id, device, wait);
789 ap->ops->dev_select(ap, device);
792 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
799 * ata_dump_id - IDENTIFY DEVICE info debugging output
800 * @id: IDENTIFY DEVICE page to dump
802 * Dump selected 16-bit words from the given IDENTIFY DEVICE
809 static inline void ata_dump_id(const u16 *id)
811 DPRINTK("49==0x%04x "
821 DPRINTK("80==0x%04x "
831 DPRINTK("88==0x%04x "
838 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
839 * @id: IDENTIFY data to compute xfer mask from
841 * Compute the xfermask for this device. This is not as trivial
842 * as it seems if we must consider early devices correctly.
844 * FIXME: pre IDE drive timing (do we care ?).
852 static unsigned int ata_id_xfermask(const u16 *id)
854 unsigned int pio_mask, mwdma_mask, udma_mask;
856 /* Usual case. Word 53 indicates word 64 is valid */
857 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
858 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
862 /* If word 64 isn't valid then Word 51 high byte holds
863 * the PIO timing number for the maximum. Turn it into
866 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
868 /* But wait.. there's more. Design your standards by
869 * committee and you too can get a free iordy field to
870 * process. However its the speeds not the modes that
871 * are supported... Note drivers using the timing API
872 * will get this right anyway
876 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
879 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
880 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
882 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
886 * ata_port_queue_task - Queue port_task
887 * @ap: The ata_port to queue port_task for
888 * @fn: workqueue function to be scheduled
889 * @data: data value to pass to workqueue function
890 * @delay: delay time for workqueue function
892 * Schedule @fn(@data) for execution after @delay jiffies using
893 * port_task. There is one port_task per port and it's the
894 * user(low level driver)'s responsibility to make sure that only
895 * one task is active at any given time.
897 * libata core layer takes care of synchronization between
898 * port_task and EH. ata_port_queue_task() may be ignored for EH
902 * Inherited from caller.
904 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
909 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
912 PREPARE_WORK(&ap->port_task, fn, data);
915 rc = queue_work(ata_wq, &ap->port_task);
917 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
919 /* rc == 0 means that another user is using port task */
924 * ata_port_flush_task - Flush port_task
925 * @ap: The ata_port to flush port_task for
927 * After this function completes, port_task is guranteed not to
928 * be running or scheduled.
931 * Kernel thread context (may sleep)
933 void ata_port_flush_task(struct ata_port *ap)
939 spin_lock_irqsave(ap->lock, flags);
940 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
941 spin_unlock_irqrestore(ap->lock, flags);
943 DPRINTK("flush #1\n");
944 flush_workqueue(ata_wq);
947 * At this point, if a task is running, it's guaranteed to see
948 * the FLUSH flag; thus, it will never queue pio tasks again.
951 if (!cancel_delayed_work(&ap->port_task)) {
953 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
954 flush_workqueue(ata_wq);
957 spin_lock_irqsave(ap->lock, flags);
958 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
959 spin_unlock_irqrestore(ap->lock, flags);
962 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
965 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
967 struct completion *waiting = qc->private_data;
973 * ata_exec_internal - execute libata internal command
974 * @dev: Device to which the command is sent
975 * @tf: Taskfile registers for the command and the result
976 * @cdb: CDB for packet command
977 * @dma_dir: Data tranfer direction of the command
978 * @buf: Data buffer of the command
979 * @buflen: Length of data buffer
981 * Executes libata internal command with timeout. @tf contains
982 * command on entry and result on return. Timeout and error
983 * conditions are reported via return value. No recovery action
984 * is taken after a command times out. It's caller's duty to
985 * clean up after timeout.
988 * None. Should be called with kernel context, might sleep.
991 * Zero on success, AC_ERR_* mask on failure
993 unsigned ata_exec_internal(struct ata_device *dev,
994 struct ata_taskfile *tf, const u8 *cdb,
995 int dma_dir, void *buf, unsigned int buflen)
997 struct ata_port *ap = dev->ap;
998 u8 command = tf->command;
999 struct ata_queued_cmd *qc;
1000 unsigned int tag, preempted_tag;
1001 u32 preempted_sactive, preempted_qc_active;
1002 DECLARE_COMPLETION(wait);
1003 unsigned long flags;
1004 unsigned int err_mask;
1007 spin_lock_irqsave(ap->lock, flags);
1009 /* no internal command while frozen */
1010 if (ap->flags & ATA_FLAG_FROZEN) {
1011 spin_unlock_irqrestore(ap->lock, flags);
1012 return AC_ERR_SYSTEM;
1015 /* initialize internal qc */
1017 /* XXX: Tag 0 is used for drivers with legacy EH as some
1018 * drivers choke if any other tag is given. This breaks
1019 * ata_tag_internal() test for those drivers. Don't use new
1020 * EH stuff without converting to it.
1022 if (ap->ops->error_handler)
1023 tag = ATA_TAG_INTERNAL;
1027 if (test_and_set_bit(tag, &ap->qc_allocated))
1029 qc = __ata_qc_from_tag(ap, tag);
1037 preempted_tag = ap->active_tag;
1038 preempted_sactive = ap->sactive;
1039 preempted_qc_active = ap->qc_active;
1040 ap->active_tag = ATA_TAG_POISON;
1044 /* prepare & issue qc */
1047 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1048 qc->flags |= ATA_QCFLAG_RESULT_TF;
1049 qc->dma_dir = dma_dir;
1050 if (dma_dir != DMA_NONE) {
1051 ata_sg_init_one(qc, buf, buflen);
1052 qc->nsect = buflen / ATA_SECT_SIZE;
1055 qc->private_data = &wait;
1056 qc->complete_fn = ata_qc_complete_internal;
1060 spin_unlock_irqrestore(ap->lock, flags);
1062 rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
1064 ata_port_flush_task(ap);
1067 spin_lock_irqsave(ap->lock, flags);
1069 /* We're racing with irq here. If we lose, the
1070 * following test prevents us from completing the qc
1071 * twice. If we win, the port is frozen and will be
1072 * cleaned up by ->post_internal_cmd().
1074 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1075 qc->err_mask |= AC_ERR_TIMEOUT;
1077 if (ap->ops->error_handler)
1078 ata_port_freeze(ap);
1080 ata_qc_complete(qc);
1082 if (ata_msg_warn(ap))
1083 ata_dev_printk(dev, KERN_WARNING,
1084 "qc timeout (cmd 0x%x)\n", command);
1087 spin_unlock_irqrestore(ap->lock, flags);
1090 /* do post_internal_cmd */
1091 if (ap->ops->post_internal_cmd)
1092 ap->ops->post_internal_cmd(qc);
1094 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1095 if (ata_msg_warn(ap))
1096 ata_dev_printk(dev, KERN_WARNING,
1097 "zero err_mask for failed "
1098 "internal command, assuming AC_ERR_OTHER\n");
1099 qc->err_mask |= AC_ERR_OTHER;
1103 spin_lock_irqsave(ap->lock, flags);
1105 *tf = qc->result_tf;
1106 err_mask = qc->err_mask;
1109 ap->active_tag = preempted_tag;
1110 ap->sactive = preempted_sactive;
1111 ap->qc_active = preempted_qc_active;
1113 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1114 * Until those drivers are fixed, we detect the condition
1115 * here, fail the command with AC_ERR_SYSTEM and reenable the
1118 * Note that this doesn't change any behavior as internal
1119 * command failure results in disabling the device in the
1120 * higher layer for LLDDs without new reset/EH callbacks.
1122 * Kill the following code as soon as those drivers are fixed.
1124 if (ap->flags & ATA_FLAG_DISABLED) {
1125 err_mask |= AC_ERR_SYSTEM;
1129 spin_unlock_irqrestore(ap->lock, flags);
1135 * ata_do_simple_cmd - execute simple internal command
1136 * @dev: Device to which the command is sent
1137 * @cmd: Opcode to execute
1139 * Execute a 'simple' command, that only consists of the opcode
1140 * 'cmd' itself, without filling any other registers
1143 * Kernel thread context (may sleep).
1146 * Zero on success, AC_ERR_* mask on failure
1148 static unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1150 struct ata_taskfile tf;
1152 ata_tf_init(dev, &tf);
1155 tf.flags |= ATA_TFLAG_DEVICE;
1156 tf.protocol = ATA_PROT_NODATA;
1158 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1162 * ata_pio_need_iordy - check if iordy needed
1165 * Check if the current speed of the device requires IORDY. Used
1166 * by various controllers for chip configuration.
1169 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1172 int speed = adev->pio_mode - XFER_PIO_0;
1179 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1181 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1182 pio = adev->id[ATA_ID_EIDE_PIO];
1183 /* Is the speed faster than the drive allows non IORDY ? */
1185 /* This is cycle times not frequency - watch the logic! */
1186 if (pio > 240) /* PIO2 is 240nS per cycle */
1195 * ata_dev_read_id - Read ID data from the specified device
1196 * @dev: target device
1197 * @p_class: pointer to class of the target device (may be changed)
1198 * @post_reset: is this read ID post-reset?
1199 * @id: buffer to read IDENTIFY data into
1201 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1202 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1203 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1204 * for pre-ATA4 drives.
1207 * Kernel thread context (may sleep)
1210 * 0 on success, -errno otherwise.
1212 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1213 int post_reset, u16 *id)
1215 struct ata_port *ap = dev->ap;
1216 unsigned int class = *p_class;
1217 struct ata_taskfile tf;
1218 unsigned int err_mask = 0;
1222 if (ata_msg_ctl(ap))
1223 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1224 __FUNCTION__, ap->id, dev->devno);
1226 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1229 ata_tf_init(dev, &tf);
1233 tf.command = ATA_CMD_ID_ATA;
1236 tf.command = ATA_CMD_ID_ATAPI;
1240 reason = "unsupported class";
1244 tf.protocol = ATA_PROT_PIO;
1246 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1247 id, sizeof(id[0]) * ATA_ID_WORDS);
1250 reason = "I/O error";
1254 swap_buf_le16(id, ATA_ID_WORDS);
1257 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1259 reason = "device reports illegal type";
1263 if (post_reset && class == ATA_DEV_ATA) {
1265 * The exact sequence expected by certain pre-ATA4 drives is:
1268 * INITIALIZE DEVICE PARAMETERS
1270 * Some drives were very specific about that exact sequence.
1272 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1273 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1276 reason = "INIT_DEV_PARAMS failed";
1280 /* current CHS translation info (id[53-58]) might be
1281 * changed. reread the identify device info.
1293 if (ata_msg_warn(ap))
1294 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1295 "(%s, err_mask=0x%x)\n", reason, err_mask);
1299 static inline u8 ata_dev_knobble(struct ata_device *dev)
1301 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1304 static void ata_dev_config_ncq(struct ata_device *dev,
1305 char *desc, size_t desc_sz)
1307 struct ata_port *ap = dev->ap;
1308 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1310 if (!ata_id_has_ncq(dev->id)) {
1315 if (ap->flags & ATA_FLAG_NCQ) {
1316 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1317 dev->flags |= ATA_DFLAG_NCQ;
1320 if (hdepth >= ddepth)
1321 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1323 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1327 * ata_dev_configure - Configure the specified ATA/ATAPI device
1328 * @dev: Target device to configure
1329 * @print_info: Enable device info printout
1331 * Configure @dev according to @dev->id. Generic and low-level
1332 * driver specific fixups are also applied.
1335 * Kernel thread context (may sleep)
1338 * 0 on success, -errno otherwise
1340 int ata_dev_configure(struct ata_device *dev, int print_info)
1342 struct ata_port *ap = dev->ap;
1343 const u16 *id = dev->id;
1344 unsigned int xfer_mask;
1347 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1348 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1349 __FUNCTION__, ap->id, dev->devno);
1353 if (ata_msg_probe(ap))
1354 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1355 __FUNCTION__, ap->id, dev->devno);
1357 /* print device capabilities */
1358 if (ata_msg_probe(ap))
1359 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
1360 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1362 id[49], id[82], id[83], id[84],
1363 id[85], id[86], id[87], id[88]);
1365 /* initialize to-be-configured parameters */
1366 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1367 dev->max_sectors = 0;
1375 * common ATA, ATAPI feature tests
1378 /* find max transfer mode; for printk only */
1379 xfer_mask = ata_id_xfermask(id);
1381 if (ata_msg_probe(ap))
1384 /* ATA-specific feature tests */
1385 if (dev->class == ATA_DEV_ATA) {
1386 dev->n_sectors = ata_id_n_sectors(id);
1388 if (ata_id_has_lba(id)) {
1389 const char *lba_desc;
1393 dev->flags |= ATA_DFLAG_LBA;
1394 if (ata_id_has_lba48(id)) {
1395 dev->flags |= ATA_DFLAG_LBA48;
1400 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1402 /* print device info to dmesg */
1403 if (ata_msg_info(ap))
1404 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1405 "max %s, %Lu sectors: %s %s\n",
1406 ata_id_major_version(id),
1407 ata_mode_string(xfer_mask),
1408 (unsigned long long)dev->n_sectors,
1409 lba_desc, ncq_desc);
1413 /* Default translation */
1414 dev->cylinders = id[1];
1416 dev->sectors = id[6];
1418 if (ata_id_current_chs_valid(id)) {
1419 /* Current CHS translation is valid. */
1420 dev->cylinders = id[54];
1421 dev->heads = id[55];
1422 dev->sectors = id[56];
1425 /* print device info to dmesg */
1426 if (ata_msg_info(ap))
1427 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1428 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1429 ata_id_major_version(id),
1430 ata_mode_string(xfer_mask),
1431 (unsigned long long)dev->n_sectors,
1432 dev->cylinders, dev->heads, dev->sectors);
1435 if (dev->id[59] & 0x100) {
1436 dev->multi_count = dev->id[59] & 0xff;
1437 if (ata_msg_info(ap))
1438 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
1439 ap->id, dev->devno, dev->multi_count);
1445 /* ATAPI-specific feature tests */
1446 else if (dev->class == ATA_DEV_ATAPI) {
1447 char *cdb_intr_string = "";
1449 rc = atapi_cdb_len(id);
1450 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1451 if (ata_msg_warn(ap))
1452 ata_dev_printk(dev, KERN_WARNING,
1453 "unsupported CDB len\n");
1457 dev->cdb_len = (unsigned int) rc;
1459 if (ata_id_cdb_intr(dev->id)) {
1460 dev->flags |= ATA_DFLAG_CDB_INTR;
1461 cdb_intr_string = ", CDB intr";
1464 /* print device info to dmesg */
1465 if (ata_msg_info(ap))
1466 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1467 ata_mode_string(xfer_mask),
1471 ap->host->max_cmd_len = 0;
1472 for (i = 0; i < ATA_MAX_DEVICES; i++)
1473 ap->host->max_cmd_len = max_t(unsigned int,
1474 ap->host->max_cmd_len,
1475 ap->device[i].cdb_len);
1477 /* limit bridge transfers to udma5, 200 sectors */
1478 if (ata_dev_knobble(dev)) {
1479 if (ata_msg_info(ap))
1480 ata_dev_printk(dev, KERN_INFO,
1481 "applying bridge limits\n");
1482 dev->udma_mask &= ATA_UDMA5;
1483 dev->max_sectors = ATA_MAX_SECTORS;
1486 if (ap->ops->dev_config)
1487 ap->ops->dev_config(ap, dev);
1489 if (ata_msg_probe(ap))
1490 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1491 __FUNCTION__, ata_chk_status(ap));
1495 if (ata_msg_probe(ap))
1496 ata_dev_printk(dev, KERN_DEBUG,
1497 "%s: EXIT, err\n", __FUNCTION__);
1502 * ata_bus_probe - Reset and probe ATA bus
1505 * Master ATA bus probing function. Initiates a hardware-dependent
1506 * bus reset, then attempts to identify any devices found on
1510 * PCI/etc. bus probe sem.
1513 * Zero on success, negative errno otherwise.
1516 static int ata_bus_probe(struct ata_port *ap)
1518 unsigned int classes[ATA_MAX_DEVICES];
1519 int tries[ATA_MAX_DEVICES];
1520 int i, rc, down_xfermask;
1521 struct ata_device *dev;
1525 for (i = 0; i < ATA_MAX_DEVICES; i++)
1526 tries[i] = ATA_PROBE_MAX_TRIES;
1531 /* reset and determine device classes */
1532 ap->ops->phy_reset(ap);
1534 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1535 dev = &ap->device[i];
1537 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1538 dev->class != ATA_DEV_UNKNOWN)
1539 classes[dev->devno] = dev->class;
1541 classes[dev->devno] = ATA_DEV_NONE;
1543 dev->class = ATA_DEV_UNKNOWN;
1548 /* after the reset the device state is PIO 0 and the controller
1549 state is undefined. Record the mode */
1551 for (i = 0; i < ATA_MAX_DEVICES; i++)
1552 ap->device[i].pio_mode = XFER_PIO_0;
1554 /* read IDENTIFY page and configure devices */
1555 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1556 dev = &ap->device[i];
1559 dev->class = classes[i];
1561 if (!ata_dev_enabled(dev))
1564 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1568 rc = ata_dev_configure(dev, 1);
1573 /* configure transfer mode */
1574 rc = ata_set_mode(ap, &dev);
1580 for (i = 0; i < ATA_MAX_DEVICES; i++)
1581 if (ata_dev_enabled(&ap->device[i]))
1584 /* no device present, disable port */
1585 ata_port_disable(ap);
1586 ap->ops->port_disable(ap);
1593 tries[dev->devno] = 0;
1596 sata_down_spd_limit(ap);
1599 tries[dev->devno]--;
1600 if (down_xfermask &&
1601 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1602 tries[dev->devno] = 0;
1605 if (!tries[dev->devno]) {
1606 ata_down_xfermask_limit(dev, 1);
1607 ata_dev_disable(dev);
1614 * ata_port_probe - Mark port as enabled
1615 * @ap: Port for which we indicate enablement
1617 * Modify @ap data structure such that the system
1618 * thinks that the entire port is enabled.
1620 * LOCKING: host_set lock, or some other form of
1624 void ata_port_probe(struct ata_port *ap)
1626 ap->flags &= ~ATA_FLAG_DISABLED;
1630 * sata_print_link_status - Print SATA link status
1631 * @ap: SATA port to printk link status about
1633 * This function prints link speed and status of a SATA link.
1638 static void sata_print_link_status(struct ata_port *ap)
1640 u32 sstatus, scontrol, tmp;
1642 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1644 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1646 if (ata_port_online(ap)) {
1647 tmp = (sstatus >> 4) & 0xf;
1648 ata_port_printk(ap, KERN_INFO,
1649 "SATA link up %s (SStatus %X SControl %X)\n",
1650 sata_spd_string(tmp), sstatus, scontrol);
1652 ata_port_printk(ap, KERN_INFO,
1653 "SATA link down (SStatus %X SControl %X)\n",
1659 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1660 * @ap: SATA port associated with target SATA PHY.
1662 * This function issues commands to standard SATA Sxxx
1663 * PHY registers, to wake up the phy (and device), and
1664 * clear any reset condition.
1667 * PCI/etc. bus probe sem.
1670 void __sata_phy_reset(struct ata_port *ap)
1673 unsigned long timeout = jiffies + (HZ * 5);
1675 if (ap->flags & ATA_FLAG_SATA_RESET) {
1676 /* issue phy wake/reset */
1677 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1678 /* Couldn't find anything in SATA I/II specs, but
1679 * AHCI-1.1 10.4.2 says at least 1 ms. */
1682 /* phy wake/clear reset */
1683 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1685 /* wait for phy to become ready, if necessary */
1688 sata_scr_read(ap, SCR_STATUS, &sstatus);
1689 if ((sstatus & 0xf) != 1)
1691 } while (time_before(jiffies, timeout));
1693 /* print link status */
1694 sata_print_link_status(ap);
1696 /* TODO: phy layer with polling, timeouts, etc. */
1697 if (!ata_port_offline(ap))
1700 ata_port_disable(ap);
1702 if (ap->flags & ATA_FLAG_DISABLED)
1705 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1706 ata_port_disable(ap);
1710 ap->cbl = ATA_CBL_SATA;
1714 * sata_phy_reset - Reset SATA bus.
1715 * @ap: SATA port associated with target SATA PHY.
1717 * This function resets the SATA bus, and then probes
1718 * the bus for devices.
1721 * PCI/etc. bus probe sem.
1724 void sata_phy_reset(struct ata_port *ap)
1726 __sata_phy_reset(ap);
1727 if (ap->flags & ATA_FLAG_DISABLED)
1733 * ata_dev_pair - return other device on cable
1736 * Obtain the other device on the same cable, or if none is
1737 * present NULL is returned
1740 struct ata_device *ata_dev_pair(struct ata_device *adev)
1742 struct ata_port *ap = adev->ap;
1743 struct ata_device *pair = &ap->device[1 - adev->devno];
1744 if (!ata_dev_enabled(pair))
1750 * ata_port_disable - Disable port.
1751 * @ap: Port to be disabled.
1753 * Modify @ap data structure such that the system
1754 * thinks that the entire port is disabled, and should
1755 * never attempt to probe or communicate with devices
1758 * LOCKING: host_set lock, or some other form of
1762 void ata_port_disable(struct ata_port *ap)
1764 ap->device[0].class = ATA_DEV_NONE;
1765 ap->device[1].class = ATA_DEV_NONE;
1766 ap->flags |= ATA_FLAG_DISABLED;
1770 * sata_down_spd_limit - adjust SATA spd limit downward
1771 * @ap: Port to adjust SATA spd limit for
1773 * Adjust SATA spd limit of @ap downward. Note that this
1774 * function only adjusts the limit. The change must be applied
1775 * using sata_set_spd().
1778 * Inherited from caller.
1781 * 0 on success, negative errno on failure
1783 int sata_down_spd_limit(struct ata_port *ap)
1785 u32 sstatus, spd, mask;
1788 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1792 mask = ap->sata_spd_limit;
1795 highbit = fls(mask) - 1;
1796 mask &= ~(1 << highbit);
1798 spd = (sstatus >> 4) & 0xf;
1802 mask &= (1 << spd) - 1;
1806 ap->sata_spd_limit = mask;
1808 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1809 sata_spd_string(fls(mask)));
1814 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1818 if (ap->sata_spd_limit == UINT_MAX)
1821 limit = fls(ap->sata_spd_limit);
1823 spd = (*scontrol >> 4) & 0xf;
1824 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1826 return spd != limit;
1830 * sata_set_spd_needed - is SATA spd configuration needed
1831 * @ap: Port in question
1833 * Test whether the spd limit in SControl matches
1834 * @ap->sata_spd_limit. This function is used to determine
1835 * whether hardreset is necessary to apply SATA spd
1839 * Inherited from caller.
1842 * 1 if SATA spd configuration is needed, 0 otherwise.
1844 int sata_set_spd_needed(struct ata_port *ap)
1848 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1851 return __sata_set_spd_needed(ap, &scontrol);
1855 * sata_set_spd - set SATA spd according to spd limit
1856 * @ap: Port to set SATA spd for
1858 * Set SATA spd of @ap according to sata_spd_limit.
1861 * Inherited from caller.
1864 * 0 if spd doesn't need to be changed, 1 if spd has been
1865 * changed. Negative errno if SCR registers are inaccessible.
1867 int sata_set_spd(struct ata_port *ap)
1872 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1875 if (!__sata_set_spd_needed(ap, &scontrol))
1878 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1885 * This mode timing computation functionality is ported over from
1886 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1889 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1890 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1891 * for PIO 5, which is a nonstandard extension and UDMA6, which
1892 * is currently supported only by Maxtor drives.
1895 static const struct ata_timing ata_timing[] = {
1897 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1898 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1899 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1900 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1902 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1903 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1904 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1906 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1908 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1909 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1910 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1912 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1913 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1914 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1916 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1917 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1918 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1920 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1921 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1922 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1924 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1929 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1930 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1932 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1934 q->setup = EZ(t->setup * 1000, T);
1935 q->act8b = EZ(t->act8b * 1000, T);
1936 q->rec8b = EZ(t->rec8b * 1000, T);
1937 q->cyc8b = EZ(t->cyc8b * 1000, T);
1938 q->active = EZ(t->active * 1000, T);
1939 q->recover = EZ(t->recover * 1000, T);
1940 q->cycle = EZ(t->cycle * 1000, T);
1941 q->udma = EZ(t->udma * 1000, UT);
1944 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1945 struct ata_timing *m, unsigned int what)
1947 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1948 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1949 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1950 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1951 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1952 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1953 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1954 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1957 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1959 const struct ata_timing *t;
1961 for (t = ata_timing; t->mode != speed; t++)
1962 if (t->mode == 0xFF)
1967 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1968 struct ata_timing *t, int T, int UT)
1970 const struct ata_timing *s;
1971 struct ata_timing p;
1977 if (!(s = ata_timing_find_mode(speed)))
1980 memcpy(t, s, sizeof(*s));
1983 * If the drive is an EIDE drive, it can tell us it needs extended
1984 * PIO/MW_DMA cycle timing.
1987 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1988 memset(&p, 0, sizeof(p));
1989 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1990 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1991 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1992 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1993 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1995 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1999 * Convert the timing to bus clock counts.
2002 ata_timing_quantize(t, t, T, UT);
2005 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2006 * S.M.A.R.T * and some other commands. We have to ensure that the
2007 * DMA cycle timing is slower/equal than the fastest PIO timing.
2010 if (speed > XFER_PIO_4) {
2011 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2012 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2016 * Lengthen active & recovery time so that cycle time is correct.
2019 if (t->act8b + t->rec8b < t->cyc8b) {
2020 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2021 t->rec8b = t->cyc8b - t->act8b;
2024 if (t->active + t->recover < t->cycle) {
2025 t->active += (t->cycle - (t->active + t->recover)) / 2;
2026 t->recover = t->cycle - t->active;
2033 * ata_down_xfermask_limit - adjust dev xfer masks downward
2034 * @dev: Device to adjust xfer masks
2035 * @force_pio0: Force PIO0
2037 * Adjust xfer masks of @dev downward. Note that this function
2038 * does not apply the change. Invoking ata_set_mode() afterwards
2039 * will apply the limit.
2042 * Inherited from caller.
2045 * 0 on success, negative errno on failure
2047 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2049 unsigned long xfer_mask;
2052 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2057 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2058 if (xfer_mask & ATA_MASK_UDMA)
2059 xfer_mask &= ~ATA_MASK_MWDMA;
2061 highbit = fls(xfer_mask) - 1;
2062 xfer_mask &= ~(1 << highbit);
2064 xfer_mask &= 1 << ATA_SHIFT_PIO;
2068 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2071 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2072 ata_mode_string(xfer_mask));
2080 static int ata_dev_set_mode(struct ata_device *dev)
2082 unsigned int err_mask;
2085 dev->flags &= ~ATA_DFLAG_PIO;
2086 if (dev->xfer_shift == ATA_SHIFT_PIO)
2087 dev->flags |= ATA_DFLAG_PIO;
2089 err_mask = ata_dev_set_xfermode(dev);
2091 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2092 "(err_mask=0x%x)\n", err_mask);
2096 rc = ata_dev_revalidate(dev, 0);
2100 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2101 dev->xfer_shift, (int)dev->xfer_mode);
2103 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2104 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2109 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2110 * @ap: port on which timings will be programmed
2111 * @r_failed_dev: out paramter for failed device
2113 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2114 * ata_set_mode() fails, pointer to the failing device is
2115 * returned in @r_failed_dev.
2118 * PCI/etc. bus probe sem.
2121 * 0 on success, negative errno otherwise
2123 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2125 struct ata_device *dev;
2126 int i, rc = 0, used_dma = 0, found = 0;
2128 /* has private set_mode? */
2129 if (ap->ops->set_mode) {
2130 /* FIXME: make ->set_mode handle no device case and
2131 * return error code and failing device on failure.
2133 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2134 if (ata_dev_enabled(&ap->device[i])) {
2135 ap->ops->set_mode(ap);
2142 /* step 1: calculate xfer_mask */
2143 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2144 unsigned int pio_mask, dma_mask;
2146 dev = &ap->device[i];
2148 if (!ata_dev_enabled(dev))
2151 ata_dev_xfermask(dev);
2153 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2154 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2155 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2156 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2165 /* step 2: always set host PIO timings */
2166 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2167 dev = &ap->device[i];
2168 if (!ata_dev_enabled(dev))
2171 if (!dev->pio_mode) {
2172 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2177 dev->xfer_mode = dev->pio_mode;
2178 dev->xfer_shift = ATA_SHIFT_PIO;
2179 if (ap->ops->set_piomode)
2180 ap->ops->set_piomode(ap, dev);
2183 /* step 3: set host DMA timings */
2184 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2185 dev = &ap->device[i];
2187 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2190 dev->xfer_mode = dev->dma_mode;
2191 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2192 if (ap->ops->set_dmamode)
2193 ap->ops->set_dmamode(ap, dev);
2196 /* step 4: update devices' xfer mode */
2197 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2198 dev = &ap->device[i];
2200 if (!ata_dev_enabled(dev))
2203 rc = ata_dev_set_mode(dev);
2208 /* Record simplex status. If we selected DMA then the other
2209 * host channels are not permitted to do so.
2211 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2212 ap->host_set->simplex_claimed = 1;
2214 /* step5: chip specific finalisation */
2215 if (ap->ops->post_set_mode)
2216 ap->ops->post_set_mode(ap);
2220 *r_failed_dev = dev;
2225 * ata_tf_to_host - issue ATA taskfile to host controller
2226 * @ap: port to which command is being issued
2227 * @tf: ATA taskfile register set
2229 * Issues ATA taskfile register set to ATA host controller,
2230 * with proper synchronization with interrupt handler and
2234 * spin_lock_irqsave(host_set lock)
2237 static inline void ata_tf_to_host(struct ata_port *ap,
2238 const struct ata_taskfile *tf)
2240 ap->ops->tf_load(ap, tf);
2241 ap->ops->exec_command(ap, tf);
2245 * ata_busy_sleep - sleep until BSY clears, or timeout
2246 * @ap: port containing status register to be polled
2247 * @tmout_pat: impatience timeout
2248 * @tmout: overall timeout
2250 * Sleep until ATA Status register bit BSY clears,
2251 * or a timeout occurs.
2256 unsigned int ata_busy_sleep (struct ata_port *ap,
2257 unsigned long tmout_pat, unsigned long tmout)
2259 unsigned long timer_start, timeout;
2262 status = ata_busy_wait(ap, ATA_BUSY, 300);
2263 timer_start = jiffies;
2264 timeout = timer_start + tmout_pat;
2265 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2267 status = ata_busy_wait(ap, ATA_BUSY, 3);
2270 if (status & ATA_BUSY)
2271 ata_port_printk(ap, KERN_WARNING,
2272 "port is slow to respond, please be patient\n");
2274 timeout = timer_start + tmout;
2275 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2277 status = ata_chk_status(ap);
2280 if (status & ATA_BUSY) {
2281 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2282 "(%lu secs)\n", tmout / HZ);
2289 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2291 struct ata_ioports *ioaddr = &ap->ioaddr;
2292 unsigned int dev0 = devmask & (1 << 0);
2293 unsigned int dev1 = devmask & (1 << 1);
2294 unsigned long timeout;
2296 /* if device 0 was found in ata_devchk, wait for its
2300 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2302 /* if device 1 was found in ata_devchk, wait for
2303 * register access, then wait for BSY to clear
2305 timeout = jiffies + ATA_TMOUT_BOOT;
2309 ap->ops->dev_select(ap, 1);
2310 if (ap->flags & ATA_FLAG_MMIO) {
2311 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2312 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2314 nsect = inb(ioaddr->nsect_addr);
2315 lbal = inb(ioaddr->lbal_addr);
2317 if ((nsect == 1) && (lbal == 1))
2319 if (time_after(jiffies, timeout)) {
2323 msleep(50); /* give drive a breather */
2326 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2328 /* is all this really necessary? */
2329 ap->ops->dev_select(ap, 0);
2331 ap->ops->dev_select(ap, 1);
2333 ap->ops->dev_select(ap, 0);
2336 static unsigned int ata_bus_softreset(struct ata_port *ap,
2337 unsigned int devmask)
2339 struct ata_ioports *ioaddr = &ap->ioaddr;
2341 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2343 /* software reset. causes dev0 to be selected */
2344 if (ap->flags & ATA_FLAG_MMIO) {
2345 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2346 udelay(20); /* FIXME: flush */
2347 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2348 udelay(20); /* FIXME: flush */
2349 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2351 outb(ap->ctl, ioaddr->ctl_addr);
2353 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2355 outb(ap->ctl, ioaddr->ctl_addr);
2358 /* spec mandates ">= 2ms" before checking status.
2359 * We wait 150ms, because that was the magic delay used for
2360 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2361 * between when the ATA command register is written, and then
2362 * status is checked. Because waiting for "a while" before
2363 * checking status is fine, post SRST, we perform this magic
2364 * delay here as well.
2366 * Old drivers/ide uses the 2mS rule and then waits for ready
2370 /* Before we perform post reset processing we want to see if
2371 * the bus shows 0xFF because the odd clown forgets the D7
2372 * pulldown resistor.
2374 if (ata_check_status(ap) == 0xFF) {
2375 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2376 return AC_ERR_OTHER;
2379 ata_bus_post_reset(ap, devmask);
2385 * ata_bus_reset - reset host port and associated ATA channel
2386 * @ap: port to reset
2388 * This is typically the first time we actually start issuing
2389 * commands to the ATA channel. We wait for BSY to clear, then
2390 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2391 * result. Determine what devices, if any, are on the channel
2392 * by looking at the device 0/1 error register. Look at the signature
2393 * stored in each device's taskfile registers, to determine if
2394 * the device is ATA or ATAPI.
2397 * PCI/etc. bus probe sem.
2398 * Obtains host_set lock.
2401 * Sets ATA_FLAG_DISABLED if bus reset fails.
2404 void ata_bus_reset(struct ata_port *ap)
2406 struct ata_ioports *ioaddr = &ap->ioaddr;
2407 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2409 unsigned int dev0, dev1 = 0, devmask = 0;
2411 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2413 /* determine if device 0/1 are present */
2414 if (ap->flags & ATA_FLAG_SATA_RESET)
2417 dev0 = ata_devchk(ap, 0);
2419 dev1 = ata_devchk(ap, 1);
2423 devmask |= (1 << 0);
2425 devmask |= (1 << 1);
2427 /* select device 0 again */
2428 ap->ops->dev_select(ap, 0);
2430 /* issue bus reset */
2431 if (ap->flags & ATA_FLAG_SRST)
2432 if (ata_bus_softreset(ap, devmask))
2436 * determine by signature whether we have ATA or ATAPI devices
2438 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2439 if ((slave_possible) && (err != 0x81))
2440 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2442 /* re-enable interrupts */
2443 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2446 /* is double-select really necessary? */
2447 if (ap->device[1].class != ATA_DEV_NONE)
2448 ap->ops->dev_select(ap, 1);
2449 if (ap->device[0].class != ATA_DEV_NONE)
2450 ap->ops->dev_select(ap, 0);
2452 /* if no devices were detected, disable this port */
2453 if ((ap->device[0].class == ATA_DEV_NONE) &&
2454 (ap->device[1].class == ATA_DEV_NONE))
2457 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2458 /* set up device control for ATA_FLAG_SATA_RESET */
2459 if (ap->flags & ATA_FLAG_MMIO)
2460 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2462 outb(ap->ctl, ioaddr->ctl_addr);
2469 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2470 ap->ops->port_disable(ap);
2476 * sata_phy_debounce - debounce SATA phy status
2477 * @ap: ATA port to debounce SATA phy status for
2478 * @params: timing parameters { interval, duratinon, timeout } in msec
2480 * Make sure SStatus of @ap reaches stable state, determined by
2481 * holding the same value where DET is not 1 for @duration polled
2482 * every @interval, before @timeout. Timeout constraints the
2483 * beginning of the stable state. Because, after hot unplugging,
2484 * DET gets stuck at 1 on some controllers, this functions waits
2485 * until timeout then returns 0 if DET is stable at 1.
2488 * Kernel thread context (may sleep)
2491 * 0 on success, -errno on failure.
2493 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2495 unsigned long interval_msec = params[0];
2496 unsigned long duration = params[1] * HZ / 1000;
2497 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2498 unsigned long last_jiffies;
2502 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2507 last_jiffies = jiffies;
2510 msleep(interval_msec);
2511 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2517 if (cur == 1 && time_before(jiffies, timeout))
2519 if (time_after(jiffies, last_jiffies + duration))
2524 /* unstable, start over */
2526 last_jiffies = jiffies;
2529 if (time_after(jiffies, timeout))
2535 * sata_phy_resume - resume SATA phy
2536 * @ap: ATA port to resume SATA phy for
2537 * @params: timing parameters { interval, duratinon, timeout } in msec
2539 * Resume SATA phy of @ap and debounce it.
2542 * Kernel thread context (may sleep)
2545 * 0 on success, -errno on failure.
2547 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2552 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2555 scontrol = (scontrol & 0x0f0) | 0x300;
2557 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2560 /* Some PHYs react badly if SStatus is pounded immediately
2561 * after resuming. Delay 200ms before debouncing.
2565 return sata_phy_debounce(ap, params);
2568 static void ata_wait_spinup(struct ata_port *ap)
2570 struct ata_eh_context *ehc = &ap->eh_context;
2571 unsigned long end, secs;
2574 /* first, debounce phy if SATA */
2575 if (ap->cbl == ATA_CBL_SATA) {
2576 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2578 /* if debounced successfully and offline, no need to wait */
2579 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2583 /* okay, let's give the drive time to spin up */
2584 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2585 secs = ((end - jiffies) + HZ - 1) / HZ;
2587 if (time_after(jiffies, end))
2591 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2592 "(%lu secs)\n", secs);
2594 schedule_timeout_uninterruptible(end - jiffies);
2598 * ata_std_prereset - prepare for reset
2599 * @ap: ATA port to be reset
2601 * @ap is about to be reset. Initialize it.
2604 * Kernel thread context (may sleep)
2607 * 0 on success, -errno otherwise.
2609 int ata_std_prereset(struct ata_port *ap)
2611 struct ata_eh_context *ehc = &ap->eh_context;
2612 const unsigned long *timing;
2616 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2617 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2618 ehc->i.action |= ATA_EH_HARDRESET;
2619 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2620 ata_wait_spinup(ap);
2623 /* if we're about to do hardreset, nothing more to do */
2624 if (ehc->i.action & ATA_EH_HARDRESET)
2627 /* if SATA, resume phy */
2628 if (ap->cbl == ATA_CBL_SATA) {
2629 if (ap->flags & ATA_FLAG_LOADING)
2630 timing = sata_deb_timing_boot;
2632 timing = sata_deb_timing_eh;
2634 rc = sata_phy_resume(ap, timing);
2635 if (rc && rc != -EOPNOTSUPP) {
2636 /* phy resume failed */
2637 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2638 "link for reset (errno=%d)\n", rc);
2643 /* Wait for !BSY if the controller can wait for the first D2H
2644 * Reg FIS and we don't know that no device is attached.
2646 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2647 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2653 * ata_std_softreset - reset host port via ATA SRST
2654 * @ap: port to reset
2655 * @classes: resulting classes of attached devices
2657 * Reset host port using ATA SRST.
2660 * Kernel thread context (may sleep)
2663 * 0 on success, -errno otherwise.
2665 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2667 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2668 unsigned int devmask = 0, err_mask;
2673 if (ata_port_offline(ap)) {
2674 classes[0] = ATA_DEV_NONE;
2678 /* determine if device 0/1 are present */
2679 if (ata_devchk(ap, 0))
2680 devmask |= (1 << 0);
2681 if (slave_possible && ata_devchk(ap, 1))
2682 devmask |= (1 << 1);
2684 /* select device 0 again */
2685 ap->ops->dev_select(ap, 0);
2687 /* issue bus reset */
2688 DPRINTK("about to softreset, devmask=%x\n", devmask);
2689 err_mask = ata_bus_softreset(ap, devmask);
2691 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2696 /* determine by signature whether we have ATA or ATAPI devices */
2697 classes[0] = ata_dev_try_classify(ap, 0, &err);
2698 if (slave_possible && err != 0x81)
2699 classes[1] = ata_dev_try_classify(ap, 1, &err);
2702 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2707 * sata_std_hardreset - reset host port via SATA phy reset
2708 * @ap: port to reset
2709 * @class: resulting class of attached device
2711 * SATA phy-reset host port using DET bits of SControl register.
2714 * Kernel thread context (may sleep)
2717 * 0 on success, -errno otherwise.
2719 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2726 if (sata_set_spd_needed(ap)) {
2727 /* SATA spec says nothing about how to reconfigure
2728 * spd. To be on the safe side, turn off phy during
2729 * reconfiguration. This works for at least ICH7 AHCI
2732 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2735 scontrol = (scontrol & 0x0f0) | 0x302;
2737 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2743 /* issue phy wake/reset */
2744 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2747 scontrol = (scontrol & 0x0f0) | 0x301;
2749 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2752 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2753 * 10.4.2 says at least 1 ms.
2757 /* bring phy back */
2758 sata_phy_resume(ap, sata_deb_timing_eh);
2760 /* TODO: phy layer with polling, timeouts, etc. */
2761 if (ata_port_offline(ap)) {
2762 *class = ATA_DEV_NONE;
2763 DPRINTK("EXIT, link offline\n");
2767 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2768 ata_port_printk(ap, KERN_ERR,
2769 "COMRESET failed (device not ready)\n");
2773 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2775 *class = ata_dev_try_classify(ap, 0, NULL);
2777 DPRINTK("EXIT, class=%u\n", *class);
2782 * ata_std_postreset - standard postreset callback
2783 * @ap: the target ata_port
2784 * @classes: classes of attached devices
2786 * This function is invoked after a successful reset. Note that
2787 * the device might have been reset more than once using
2788 * different reset methods before postreset is invoked.
2791 * Kernel thread context (may sleep)
2793 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2799 /* print link status */
2800 sata_print_link_status(ap);
2803 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2804 sata_scr_write(ap, SCR_ERROR, serror);
2806 /* re-enable interrupts */
2807 if (!ap->ops->error_handler) {
2808 /* FIXME: hack. create a hook instead */
2809 if (ap->ioaddr.ctl_addr)
2813 /* is double-select really necessary? */
2814 if (classes[0] != ATA_DEV_NONE)
2815 ap->ops->dev_select(ap, 1);
2816 if (classes[1] != ATA_DEV_NONE)
2817 ap->ops->dev_select(ap, 0);
2819 /* bail out if no device is present */
2820 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2821 DPRINTK("EXIT, no device\n");
2825 /* set up device control */
2826 if (ap->ioaddr.ctl_addr) {
2827 if (ap->flags & ATA_FLAG_MMIO)
2828 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2830 outb(ap->ctl, ap->ioaddr.ctl_addr);
2837 * ata_dev_same_device - Determine whether new ID matches configured device
2838 * @dev: device to compare against
2839 * @new_class: class of the new device
2840 * @new_id: IDENTIFY page of the new device
2842 * Compare @new_class and @new_id against @dev and determine
2843 * whether @dev is the device indicated by @new_class and
2850 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2852 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2855 const u16 *old_id = dev->id;
2856 unsigned char model[2][41], serial[2][21];
2859 if (dev->class != new_class) {
2860 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2861 dev->class, new_class);
2865 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2866 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2867 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2868 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2869 new_n_sectors = ata_id_n_sectors(new_id);
2871 if (strcmp(model[0], model[1])) {
2872 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2873 "'%s' != '%s'\n", model[0], model[1]);
2877 if (strcmp(serial[0], serial[1])) {
2878 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2879 "'%s' != '%s'\n", serial[0], serial[1]);
2883 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2884 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2886 (unsigned long long)dev->n_sectors,
2887 (unsigned long long)new_n_sectors);
2895 * ata_dev_revalidate - Revalidate ATA device
2896 * @dev: device to revalidate
2897 * @post_reset: is this revalidation after reset?
2899 * Re-read IDENTIFY page and make sure @dev is still attached to
2903 * Kernel thread context (may sleep)
2906 * 0 on success, negative errno otherwise
2908 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2910 unsigned int class = dev->class;
2911 u16 *id = (void *)dev->ap->sector_buf;
2914 if (!ata_dev_enabled(dev)) {
2920 rc = ata_dev_read_id(dev, &class, post_reset, id);
2924 /* is the device still there? */
2925 if (!ata_dev_same_device(dev, class, id)) {
2930 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2932 /* configure device according to the new ID */
2933 rc = ata_dev_configure(dev, 0);
2938 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2942 static const char * const ata_dma_blacklist [] = {
2943 "WDC AC11000H", NULL,
2944 "WDC AC22100H", NULL,
2945 "WDC AC32500H", NULL,
2946 "WDC AC33100H", NULL,
2947 "WDC AC31600H", NULL,
2948 "WDC AC32100H", "24.09P07",
2949 "WDC AC23200L", "21.10N21",
2950 "Compaq CRD-8241B", NULL,
2955 "SanDisk SDP3B", NULL,
2956 "SanDisk SDP3B-64", NULL,
2957 "SANYO CD-ROM CRD", NULL,
2958 "HITACHI CDR-8", NULL,
2959 "HITACHI CDR-8335", NULL,
2960 "HITACHI CDR-8435", NULL,
2961 "Toshiba CD-ROM XM-6202B", NULL,
2962 "TOSHIBA CD-ROM XM-1702BC", NULL,
2964 "E-IDE CD-ROM CR-840", NULL,
2965 "CD-ROM Drive/F5A", NULL,
2966 "WPI CDD-820", NULL,
2967 "SAMSUNG CD-ROM SC-148C", NULL,
2968 "SAMSUNG CD-ROM SC", NULL,
2969 "SanDisk SDP3B-64", NULL,
2970 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2971 "_NEC DV5800A", NULL,
2972 "SAMSUNG CD-ROM SN-124", "N001"
2975 static int ata_strim(char *s, size_t len)
2977 len = strnlen(s, len);
2979 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2980 while ((len > 0) && (s[len - 1] == ' ')) {
2987 static int ata_dma_blacklisted(const struct ata_device *dev)
2989 unsigned char model_num[40];
2990 unsigned char model_rev[16];
2991 unsigned int nlen, rlen;
2994 /* We don't support polling DMA.
2995 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
2996 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
2998 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
2999 (dev->flags & ATA_DFLAG_CDB_INTR))
3002 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3004 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3006 nlen = ata_strim(model_num, sizeof(model_num));
3007 rlen = ata_strim(model_rev, sizeof(model_rev));
3009 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3010 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3011 if (ata_dma_blacklist[i+1] == NULL)
3013 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3021 * ata_dev_xfermask - Compute supported xfermask of the given device
3022 * @dev: Device to compute xfermask for
3024 * Compute supported xfermask of @dev and store it in
3025 * dev->*_mask. This function is responsible for applying all
3026 * known limits including host controller limits, device
3029 * FIXME: The current implementation limits all transfer modes to
3030 * the fastest of the lowested device on the port. This is not
3031 * required on most controllers.
3036 static void ata_dev_xfermask(struct ata_device *dev)
3038 struct ata_port *ap = dev->ap;
3039 struct ata_host_set *hs = ap->host_set;
3040 unsigned long xfer_mask;
3043 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3044 ap->mwdma_mask, ap->udma_mask);
3046 /* Apply cable rule here. Don't apply it early because when
3047 * we handle hot plug the cable type can itself change.
3049 if (ap->cbl == ATA_CBL_PATA40)
3050 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3052 /* FIXME: Use port-wide xfermask for now */
3053 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3054 struct ata_device *d = &ap->device[i];
3056 if (ata_dev_absent(d))
3059 if (ata_dev_disabled(d)) {
3060 /* to avoid violating device selection timing */
3061 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3062 UINT_MAX, UINT_MAX);
3066 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3067 d->mwdma_mask, d->udma_mask);
3068 xfer_mask &= ata_id_xfermask(d->id);
3069 if (ata_dma_blacklisted(d))
3070 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3073 if (ata_dma_blacklisted(dev))
3074 ata_dev_printk(dev, KERN_WARNING,
3075 "device is on DMA blacklist, disabling DMA\n");
3077 if (hs->flags & ATA_HOST_SIMPLEX) {
3078 if (hs->simplex_claimed)
3079 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3082 if (ap->ops->mode_filter)
3083 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3085 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3086 &dev->mwdma_mask, &dev->udma_mask);
3090 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3091 * @dev: Device to which command will be sent
3093 * Issue SET FEATURES - XFER MODE command to device @dev
3097 * PCI/etc. bus probe sem.
3100 * 0 on success, AC_ERR_* mask otherwise.
3103 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3105 struct ata_taskfile tf;
3106 unsigned int err_mask;
3108 /* set up set-features taskfile */
3109 DPRINTK("set features - xfer mode\n");
3111 ata_tf_init(dev, &tf);
3112 tf.command = ATA_CMD_SET_FEATURES;
3113 tf.feature = SETFEATURES_XFER;
3114 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3115 tf.protocol = ATA_PROT_NODATA;
3116 tf.nsect = dev->xfer_mode;
3118 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3120 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3125 * ata_dev_init_params - Issue INIT DEV PARAMS command
3126 * @dev: Device to which command will be sent
3127 * @heads: Number of heads (taskfile parameter)
3128 * @sectors: Number of sectors (taskfile parameter)
3131 * Kernel thread context (may sleep)
3134 * 0 on success, AC_ERR_* mask otherwise.
3136 static unsigned int ata_dev_init_params(struct ata_device *dev,
3137 u16 heads, u16 sectors)
3139 struct ata_taskfile tf;
3140 unsigned int err_mask;
3142 /* Number of sectors per track 1-255. Number of heads 1-16 */
3143 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3144 return AC_ERR_INVALID;
3146 /* set up init dev params taskfile */
3147 DPRINTK("init dev params \n");
3149 ata_tf_init(dev, &tf);
3150 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3151 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3152 tf.protocol = ATA_PROT_NODATA;
3154 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3156 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3158 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3163 * ata_sg_clean - Unmap DMA memory associated with command
3164 * @qc: Command containing DMA memory to be released
3166 * Unmap all mapped DMA memory associated with this command.
3169 * spin_lock_irqsave(host_set lock)
3172 static void ata_sg_clean(struct ata_queued_cmd *qc)
3174 struct ata_port *ap = qc->ap;
3175 struct scatterlist *sg = qc->__sg;
3176 int dir = qc->dma_dir;
3177 void *pad_buf = NULL;
3179 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3180 WARN_ON(sg == NULL);
3182 if (qc->flags & ATA_QCFLAG_SINGLE)
3183 WARN_ON(qc->n_elem > 1);
3185 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3187 /* if we padded the buffer out to 32-bit bound, and data
3188 * xfer direction is from-device, we must copy from the
3189 * pad buffer back into the supplied buffer
3191 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3192 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3194 if (qc->flags & ATA_QCFLAG_SG) {
3196 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3197 /* restore last sg */
3198 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3200 struct scatterlist *psg = &qc->pad_sgent;
3201 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3202 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3203 kunmap_atomic(addr, KM_IRQ0);
3207 dma_unmap_single(ap->dev,
3208 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3211 sg->length += qc->pad_len;
3213 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3214 pad_buf, qc->pad_len);
3217 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3222 * ata_fill_sg - Fill PCI IDE PRD table
3223 * @qc: Metadata associated with taskfile to be transferred
3225 * Fill PCI IDE PRD (scatter-gather) table with segments
3226 * associated with the current disk command.
3229 * spin_lock_irqsave(host_set lock)
3232 static void ata_fill_sg(struct ata_queued_cmd *qc)
3234 struct ata_port *ap = qc->ap;
3235 struct scatterlist *sg;
3238 WARN_ON(qc->__sg == NULL);
3239 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3242 ata_for_each_sg(sg, qc) {
3246 /* determine if physical DMA addr spans 64K boundary.
3247 * Note h/w doesn't support 64-bit, so we unconditionally
3248 * truncate dma_addr_t to u32.
3250 addr = (u32) sg_dma_address(sg);
3251 sg_len = sg_dma_len(sg);
3254 offset = addr & 0xffff;
3256 if ((offset + sg_len) > 0x10000)
3257 len = 0x10000 - offset;
3259 ap->prd[idx].addr = cpu_to_le32(addr);
3260 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3261 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3270 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3273 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3274 * @qc: Metadata associated with taskfile to check
3276 * Allow low-level driver to filter ATA PACKET commands, returning
3277 * a status indicating whether or not it is OK to use DMA for the
3278 * supplied PACKET command.
3281 * spin_lock_irqsave(host_set lock)
3283 * RETURNS: 0 when ATAPI DMA can be used
3286 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3288 struct ata_port *ap = qc->ap;
3289 int rc = 0; /* Assume ATAPI DMA is OK by default */
3291 if (ap->ops->check_atapi_dma)
3292 rc = ap->ops->check_atapi_dma(qc);
3297 * ata_qc_prep - Prepare taskfile for submission
3298 * @qc: Metadata associated with taskfile to be prepared
3300 * Prepare ATA taskfile for submission.
3303 * spin_lock_irqsave(host_set lock)
3305 void ata_qc_prep(struct ata_queued_cmd *qc)
3307 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3313 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3316 * ata_sg_init_one - Associate command with memory buffer
3317 * @qc: Command to be associated
3318 * @buf: Memory buffer
3319 * @buflen: Length of memory buffer, in bytes.
3321 * Initialize the data-related elements of queued_cmd @qc
3322 * to point to a single memory buffer, @buf of byte length @buflen.
3325 * spin_lock_irqsave(host_set lock)
3328 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3330 struct scatterlist *sg;
3332 qc->flags |= ATA_QCFLAG_SINGLE;
3334 memset(&qc->sgent, 0, sizeof(qc->sgent));
3335 qc->__sg = &qc->sgent;
3337 qc->orig_n_elem = 1;
3339 qc->nbytes = buflen;
3342 sg_init_one(sg, buf, buflen);
3346 * ata_sg_init - Associate command with scatter-gather table.
3347 * @qc: Command to be associated
3348 * @sg: Scatter-gather table.
3349 * @n_elem: Number of elements in s/g table.
3351 * Initialize the data-related elements of queued_cmd @qc
3352 * to point to a scatter-gather table @sg, containing @n_elem
3356 * spin_lock_irqsave(host_set lock)
3359 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3360 unsigned int n_elem)
3362 qc->flags |= ATA_QCFLAG_SG;
3364 qc->n_elem = n_elem;
3365 qc->orig_n_elem = n_elem;
3369 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3370 * @qc: Command with memory buffer to be mapped.
3372 * DMA-map the memory buffer associated with queued_cmd @qc.
3375 * spin_lock_irqsave(host_set lock)
3378 * Zero on success, negative on error.
3381 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3383 struct ata_port *ap = qc->ap;
3384 int dir = qc->dma_dir;
3385 struct scatterlist *sg = qc->__sg;
3386 dma_addr_t dma_address;
3389 /* we must lengthen transfers to end on a 32-bit boundary */
3390 qc->pad_len = sg->length & 3;
3392 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3393 struct scatterlist *psg = &qc->pad_sgent;
3395 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3397 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3399 if (qc->tf.flags & ATA_TFLAG_WRITE)
3400 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3403 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3404 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3406 sg->length -= qc->pad_len;
3407 if (sg->length == 0)
3410 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3411 sg->length, qc->pad_len);
3419 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3421 if (dma_mapping_error(dma_address)) {
3423 sg->length += qc->pad_len;
3427 sg_dma_address(sg) = dma_address;
3428 sg_dma_len(sg) = sg->length;
3431 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3432 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3438 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3439 * @qc: Command with scatter-gather table to be mapped.
3441 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3444 * spin_lock_irqsave(host_set lock)
3447 * Zero on success, negative on error.
3451 static int ata_sg_setup(struct ata_queued_cmd *qc)
3453 struct ata_port *ap = qc->ap;
3454 struct scatterlist *sg = qc->__sg;
3455 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3456 int n_elem, pre_n_elem, dir, trim_sg = 0;
3458 VPRINTK("ENTER, ata%u\n", ap->id);
3459 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3461 /* we must lengthen transfers to end on a 32-bit boundary */
3462 qc->pad_len = lsg->length & 3;
3464 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3465 struct scatterlist *psg = &qc->pad_sgent;
3466 unsigned int offset;
3468 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3470 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3473 * psg->page/offset are used to copy to-be-written
3474 * data in this function or read data in ata_sg_clean.
3476 offset = lsg->offset + lsg->length - qc->pad_len;
3477 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3478 psg->offset = offset_in_page(offset);
3480 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3481 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3482 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3483 kunmap_atomic(addr, KM_IRQ0);
3486 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3487 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3489 lsg->length -= qc->pad_len;
3490 if (lsg->length == 0)
3493 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3494 qc->n_elem - 1, lsg->length, qc->pad_len);
3497 pre_n_elem = qc->n_elem;
3498 if (trim_sg && pre_n_elem)
3507 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3509 /* restore last sg */
3510 lsg->length += qc->pad_len;
3514 DPRINTK("%d sg elements mapped\n", n_elem);
3517 qc->n_elem = n_elem;
3523 * swap_buf_le16 - swap halves of 16-bit words in place
3524 * @buf: Buffer to swap
3525 * @buf_words: Number of 16-bit words in buffer.
3527 * Swap halves of 16-bit words if needed to convert from
3528 * little-endian byte order to native cpu byte order, or
3532 * Inherited from caller.
3534 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3539 for (i = 0; i < buf_words; i++)
3540 buf[i] = le16_to_cpu(buf[i]);
3541 #endif /* __BIG_ENDIAN */
3545 * ata_mmio_data_xfer - Transfer data by MMIO
3546 * @adev: device for this I/O
3548 * @buflen: buffer length
3549 * @write_data: read/write
3551 * Transfer data from/to the device data register by MMIO.
3554 * Inherited from caller.
3557 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3558 unsigned int buflen, int write_data)
3560 struct ata_port *ap = adev->ap;
3562 unsigned int words = buflen >> 1;
3563 u16 *buf16 = (u16 *) buf;
3564 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3566 /* Transfer multiple of 2 bytes */
3568 for (i = 0; i < words; i++)
3569 writew(le16_to_cpu(buf16[i]), mmio);
3571 for (i = 0; i < words; i++)
3572 buf16[i] = cpu_to_le16(readw(mmio));
3575 /* Transfer trailing 1 byte, if any. */
3576 if (unlikely(buflen & 0x01)) {
3577 u16 align_buf[1] = { 0 };
3578 unsigned char *trailing_buf = buf + buflen - 1;
3581 memcpy(align_buf, trailing_buf, 1);
3582 writew(le16_to_cpu(align_buf[0]), mmio);
3584 align_buf[0] = cpu_to_le16(readw(mmio));
3585 memcpy(trailing_buf, align_buf, 1);
3591 * ata_pio_data_xfer - Transfer data by PIO
3592 * @adev: device to target
3594 * @buflen: buffer length
3595 * @write_data: read/write
3597 * Transfer data from/to the device data register by PIO.
3600 * Inherited from caller.
3603 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3604 unsigned int buflen, int write_data)
3606 struct ata_port *ap = adev->ap;
3607 unsigned int words = buflen >> 1;
3609 /* Transfer multiple of 2 bytes */
3611 outsw(ap->ioaddr.data_addr, buf, words);
3613 insw(ap->ioaddr.data_addr, buf, words);
3615 /* Transfer trailing 1 byte, if any. */
3616 if (unlikely(buflen & 0x01)) {
3617 u16 align_buf[1] = { 0 };
3618 unsigned char *trailing_buf = buf + buflen - 1;
3621 memcpy(align_buf, trailing_buf, 1);
3622 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3624 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3625 memcpy(trailing_buf, align_buf, 1);
3631 * ata_pio_data_xfer_noirq - Transfer data by PIO
3632 * @adev: device to target
3634 * @buflen: buffer length
3635 * @write_data: read/write
3637 * Transfer data from/to the device data register by PIO. Do the
3638 * transfer with interrupts disabled.
3641 * Inherited from caller.
3644 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3645 unsigned int buflen, int write_data)
3647 unsigned long flags;
3648 local_irq_save(flags);
3649 ata_pio_data_xfer(adev, buf, buflen, write_data);
3650 local_irq_restore(flags);
3655 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3656 * @qc: Command on going
3658 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3661 * Inherited from caller.
3664 static void ata_pio_sector(struct ata_queued_cmd *qc)
3666 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3667 struct scatterlist *sg = qc->__sg;
3668 struct ata_port *ap = qc->ap;
3670 unsigned int offset;
3673 if (qc->cursect == (qc->nsect - 1))
3674 ap->hsm_task_state = HSM_ST_LAST;
3676 page = sg[qc->cursg].page;
3677 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3679 /* get the current page and offset */
3680 page = nth_page(page, (offset >> PAGE_SHIFT));
3681 offset %= PAGE_SIZE;
3683 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3685 if (PageHighMem(page)) {
3686 unsigned long flags;
3688 /* FIXME: use a bounce buffer */
3689 local_irq_save(flags);
3690 buf = kmap_atomic(page, KM_IRQ0);
3692 /* do the actual data transfer */
3693 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3695 kunmap_atomic(buf, KM_IRQ0);
3696 local_irq_restore(flags);
3698 buf = page_address(page);
3699 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3705 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3712 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3713 * @qc: Command on going
3715 * Transfer one or many ATA_SECT_SIZE of data from/to the
3716 * ATA device for the DRQ request.
3719 * Inherited from caller.
3722 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3724 if (is_multi_taskfile(&qc->tf)) {
3725 /* READ/WRITE MULTIPLE */
3728 WARN_ON(qc->dev->multi_count == 0);
3730 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3738 * atapi_send_cdb - Write CDB bytes to hardware
3739 * @ap: Port to which ATAPI device is attached.
3740 * @qc: Taskfile currently active
3742 * When device has indicated its readiness to accept
3743 * a CDB, this function is called. Send the CDB.
3749 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3752 DPRINTK("send cdb\n");
3753 WARN_ON(qc->dev->cdb_len < 12);
3755 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3756 ata_altstatus(ap); /* flush */
3758 switch (qc->tf.protocol) {
3759 case ATA_PROT_ATAPI:
3760 ap->hsm_task_state = HSM_ST;
3762 case ATA_PROT_ATAPI_NODATA:
3763 ap->hsm_task_state = HSM_ST_LAST;
3765 case ATA_PROT_ATAPI_DMA:
3766 ap->hsm_task_state = HSM_ST_LAST;
3767 /* initiate bmdma */
3768 ap->ops->bmdma_start(qc);
3774 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3775 * @qc: Command on going
3776 * @bytes: number of bytes
3778 * Transfer Transfer data from/to the ATAPI device.
3781 * Inherited from caller.
3785 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3787 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3788 struct scatterlist *sg = qc->__sg;
3789 struct ata_port *ap = qc->ap;
3792 unsigned int offset, count;
3794 if (qc->curbytes + bytes >= qc->nbytes)
3795 ap->hsm_task_state = HSM_ST_LAST;
3798 if (unlikely(qc->cursg >= qc->n_elem)) {
3800 * The end of qc->sg is reached and the device expects
3801 * more data to transfer. In order not to overrun qc->sg
3802 * and fulfill length specified in the byte count register,
3803 * - for read case, discard trailing data from the device
3804 * - for write case, padding zero data to the device
3806 u16 pad_buf[1] = { 0 };
3807 unsigned int words = bytes >> 1;
3810 if (words) /* warning if bytes > 1 */
3811 ata_dev_printk(qc->dev, KERN_WARNING,
3812 "%u bytes trailing data\n", bytes);
3814 for (i = 0; i < words; i++)
3815 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3817 ap->hsm_task_state = HSM_ST_LAST;
3821 sg = &qc->__sg[qc->cursg];
3824 offset = sg->offset + qc->cursg_ofs;
3826 /* get the current page and offset */
3827 page = nth_page(page, (offset >> PAGE_SHIFT));
3828 offset %= PAGE_SIZE;
3830 /* don't overrun current sg */
3831 count = min(sg->length - qc->cursg_ofs, bytes);
3833 /* don't cross page boundaries */
3834 count = min(count, (unsigned int)PAGE_SIZE - offset);
3836 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3838 if (PageHighMem(page)) {
3839 unsigned long flags;
3841 /* FIXME: use bounce buffer */
3842 local_irq_save(flags);
3843 buf = kmap_atomic(page, KM_IRQ0);
3845 /* do the actual data transfer */
3846 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3848 kunmap_atomic(buf, KM_IRQ0);
3849 local_irq_restore(flags);
3851 buf = page_address(page);
3852 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3856 qc->curbytes += count;
3857 qc->cursg_ofs += count;
3859 if (qc->cursg_ofs == sg->length) {
3869 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3870 * @qc: Command on going
3872 * Transfer Transfer data from/to the ATAPI device.
3875 * Inherited from caller.
3878 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3880 struct ata_port *ap = qc->ap;
3881 struct ata_device *dev = qc->dev;
3882 unsigned int ireason, bc_lo, bc_hi, bytes;
3883 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3885 /* Abuse qc->result_tf for temp storage of intermediate TF
3886 * here to save some kernel stack usage.
3887 * For normal completion, qc->result_tf is not relevant. For
3888 * error, qc->result_tf is later overwritten by ata_qc_complete().
3889 * So, the correctness of qc->result_tf is not affected.
3891 ap->ops->tf_read(ap, &qc->result_tf);
3892 ireason = qc->result_tf.nsect;
3893 bc_lo = qc->result_tf.lbam;
3894 bc_hi = qc->result_tf.lbah;
3895 bytes = (bc_hi << 8) | bc_lo;
3897 /* shall be cleared to zero, indicating xfer of data */
3898 if (ireason & (1 << 0))
3901 /* make sure transfer direction matches expected */
3902 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3903 if (do_write != i_write)
3906 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3908 __atapi_pio_bytes(qc, bytes);
3913 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3914 qc->err_mask |= AC_ERR_HSM;
3915 ap->hsm_task_state = HSM_ST_ERR;
3919 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3920 * @ap: the target ata_port
3924 * 1 if ok in workqueue, 0 otherwise.
3927 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3929 if (qc->tf.flags & ATA_TFLAG_POLLING)
3932 if (ap->hsm_task_state == HSM_ST_FIRST) {
3933 if (qc->tf.protocol == ATA_PROT_PIO &&
3934 (qc->tf.flags & ATA_TFLAG_WRITE))
3937 if (is_atapi_taskfile(&qc->tf) &&
3938 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3946 * ata_hsm_qc_complete - finish a qc running on standard HSM
3947 * @qc: Command to complete
3948 * @in_wq: 1 if called from workqueue, 0 otherwise
3950 * Finish @qc which is running on standard HSM.
3953 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3954 * Otherwise, none on entry and grabs host lock.
3956 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3958 struct ata_port *ap = qc->ap;
3959 unsigned long flags;
3961 if (ap->ops->error_handler) {
3963 spin_lock_irqsave(ap->lock, flags);
3965 /* EH might have kicked in while host_set lock
3968 qc = ata_qc_from_tag(ap, qc->tag);
3970 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3972 ata_qc_complete(qc);
3974 ata_port_freeze(ap);
3977 spin_unlock_irqrestore(ap->lock, flags);
3979 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3980 ata_qc_complete(qc);
3982 ata_port_freeze(ap);
3986 spin_lock_irqsave(ap->lock, flags);
3988 ata_qc_complete(qc);
3989 spin_unlock_irqrestore(ap->lock, flags);
3991 ata_qc_complete(qc);
3994 ata_altstatus(ap); /* flush */
3998 * ata_hsm_move - move the HSM to the next state.
3999 * @ap: the target ata_port
4001 * @status: current device status
4002 * @in_wq: 1 if called from workqueue, 0 otherwise
4005 * 1 when poll next status needed, 0 otherwise.
4007 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4008 u8 status, int in_wq)
4010 unsigned long flags = 0;
4013 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4015 /* Make sure ata_qc_issue_prot() does not throw things
4016 * like DMA polling into the workqueue. Notice that
4017 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4019 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4022 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4023 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4025 switch (ap->hsm_task_state) {
4027 /* Send first data block or PACKET CDB */
4029 /* If polling, we will stay in the work queue after
4030 * sending the data. Otherwise, interrupt handler
4031 * takes over after sending the data.
4033 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4035 /* check device status */
4036 if (unlikely((status & ATA_DRQ) == 0)) {
4037 /* handle BSY=0, DRQ=0 as error */
4038 if (likely(status & (ATA_ERR | ATA_DF)))
4039 /* device stops HSM for abort/error */
4040 qc->err_mask |= AC_ERR_DEV;
4042 /* HSM violation. Let EH handle this */
4043 qc->err_mask |= AC_ERR_HSM;
4045 ap->hsm_task_state = HSM_ST_ERR;
4049 /* Device should not ask for data transfer (DRQ=1)
4050 * when it finds something wrong.
4051 * We ignore DRQ here and stop the HSM by
4052 * changing hsm_task_state to HSM_ST_ERR and
4053 * let the EH abort the command or reset the device.
4055 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4056 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4058 qc->err_mask |= AC_ERR_HSM;
4059 ap->hsm_task_state = HSM_ST_ERR;
4063 /* Send the CDB (atapi) or the first data block (ata pio out).
4064 * During the state transition, interrupt handler shouldn't
4065 * be invoked before the data transfer is complete and
4066 * hsm_task_state is changed. Hence, the following locking.
4069 spin_lock_irqsave(ap->lock, flags);
4071 if (qc->tf.protocol == ATA_PROT_PIO) {
4072 /* PIO data out protocol.
4073 * send first data block.
4076 /* ata_pio_sectors() might change the state
4077 * to HSM_ST_LAST. so, the state is changed here
4078 * before ata_pio_sectors().
4080 ap->hsm_task_state = HSM_ST;
4081 ata_pio_sectors(qc);
4082 ata_altstatus(ap); /* flush */
4085 atapi_send_cdb(ap, qc);
4088 spin_unlock_irqrestore(ap->lock, flags);
4090 /* if polling, ata_pio_task() handles the rest.
4091 * otherwise, interrupt handler takes over from here.
4096 /* complete command or read/write the data register */
4097 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4098 /* ATAPI PIO protocol */
4099 if ((status & ATA_DRQ) == 0) {
4100 /* No more data to transfer or device error.
4101 * Device error will be tagged in HSM_ST_LAST.
4103 ap->hsm_task_state = HSM_ST_LAST;
4107 /* Device should not ask for data transfer (DRQ=1)
4108 * when it finds something wrong.
4109 * We ignore DRQ here and stop the HSM by
4110 * changing hsm_task_state to HSM_ST_ERR and
4111 * let the EH abort the command or reset the device.
4113 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4114 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4116 qc->err_mask |= AC_ERR_HSM;
4117 ap->hsm_task_state = HSM_ST_ERR;
4121 atapi_pio_bytes(qc);
4123 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4124 /* bad ireason reported by device */
4128 /* ATA PIO protocol */
4129 if (unlikely((status & ATA_DRQ) == 0)) {
4130 /* handle BSY=0, DRQ=0 as error */
4131 if (likely(status & (ATA_ERR | ATA_DF)))
4132 /* device stops HSM for abort/error */
4133 qc->err_mask |= AC_ERR_DEV;
4135 /* HSM violation. Let EH handle this */
4136 qc->err_mask |= AC_ERR_HSM;
4138 ap->hsm_task_state = HSM_ST_ERR;
4142 /* For PIO reads, some devices may ask for
4143 * data transfer (DRQ=1) alone with ERR=1.
4144 * We respect DRQ here and transfer one
4145 * block of junk data before changing the
4146 * hsm_task_state to HSM_ST_ERR.
4148 * For PIO writes, ERR=1 DRQ=1 doesn't make
4149 * sense since the data block has been
4150 * transferred to the device.
4152 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4153 /* data might be corrputed */
4154 qc->err_mask |= AC_ERR_DEV;
4156 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4157 ata_pio_sectors(qc);
4159 status = ata_wait_idle(ap);
4162 if (status & (ATA_BUSY | ATA_DRQ))
4163 qc->err_mask |= AC_ERR_HSM;
4165 /* ata_pio_sectors() might change the
4166 * state to HSM_ST_LAST. so, the state
4167 * is changed after ata_pio_sectors().
4169 ap->hsm_task_state = HSM_ST_ERR;
4173 ata_pio_sectors(qc);
4175 if (ap->hsm_task_state == HSM_ST_LAST &&
4176 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4179 status = ata_wait_idle(ap);
4184 ata_altstatus(ap); /* flush */
4189 if (unlikely(!ata_ok(status))) {
4190 qc->err_mask |= __ac_err_mask(status);
4191 ap->hsm_task_state = HSM_ST_ERR;
4195 /* no more data to transfer */
4196 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4197 ap->id, qc->dev->devno, status);
4199 WARN_ON(qc->err_mask);
4201 ap->hsm_task_state = HSM_ST_IDLE;
4203 /* complete taskfile transaction */
4204 ata_hsm_qc_complete(qc, in_wq);
4210 /* make sure qc->err_mask is available to
4211 * know what's wrong and recover
4213 WARN_ON(qc->err_mask == 0);
4215 ap->hsm_task_state = HSM_ST_IDLE;
4217 /* complete taskfile transaction */
4218 ata_hsm_qc_complete(qc, in_wq);
4230 static void ata_pio_task(void *_data)
4232 struct ata_queued_cmd *qc = _data;
4233 struct ata_port *ap = qc->ap;
4238 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4241 * This is purely heuristic. This is a fast path.
4242 * Sometimes when we enter, BSY will be cleared in
4243 * a chk-status or two. If not, the drive is probably seeking
4244 * or something. Snooze for a couple msecs, then
4245 * chk-status again. If still busy, queue delayed work.
4247 status = ata_busy_wait(ap, ATA_BUSY, 5);
4248 if (status & ATA_BUSY) {
4250 status = ata_busy_wait(ap, ATA_BUSY, 10);
4251 if (status & ATA_BUSY) {
4252 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4258 poll_next = ata_hsm_move(ap, qc, status, 1);
4260 /* another command or interrupt handler
4261 * may be running at this point.
4268 * ata_qc_new - Request an available ATA command, for queueing
4269 * @ap: Port associated with device @dev
4270 * @dev: Device from whom we request an available command structure
4276 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4278 struct ata_queued_cmd *qc = NULL;
4281 /* no command while frozen */
4282 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4285 /* the last tag is reserved for internal command. */
4286 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4287 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4288 qc = __ata_qc_from_tag(ap, i);
4299 * ata_qc_new_init - Request an available ATA command, and initialize it
4300 * @dev: Device from whom we request an available command structure
4306 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4308 struct ata_port *ap = dev->ap;
4309 struct ata_queued_cmd *qc;
4311 qc = ata_qc_new(ap);
4324 * ata_qc_free - free unused ata_queued_cmd
4325 * @qc: Command to complete
4327 * Designed to free unused ata_queued_cmd object
4328 * in case something prevents using it.
4331 * spin_lock_irqsave(host_set lock)
4333 void ata_qc_free(struct ata_queued_cmd *qc)
4335 struct ata_port *ap = qc->ap;
4338 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4342 if (likely(ata_tag_valid(tag))) {
4343 qc->tag = ATA_TAG_POISON;
4344 clear_bit(tag, &ap->qc_allocated);
4348 void __ata_qc_complete(struct ata_queued_cmd *qc)
4350 struct ata_port *ap = qc->ap;
4352 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4353 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4355 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4358 /* command should be marked inactive atomically with qc completion */
4359 if (qc->tf.protocol == ATA_PROT_NCQ)
4360 ap->sactive &= ~(1 << qc->tag);
4362 ap->active_tag = ATA_TAG_POISON;
4364 /* atapi: mark qc as inactive to prevent the interrupt handler
4365 * from completing the command twice later, before the error handler
4366 * is called. (when rc != 0 and atapi request sense is needed)
4368 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4369 ap->qc_active &= ~(1 << qc->tag);
4371 /* call completion callback */
4372 qc->complete_fn(qc);
4376 * ata_qc_complete - Complete an active ATA command
4377 * @qc: Command to complete
4378 * @err_mask: ATA Status register contents
4380 * Indicate to the mid and upper layers that an ATA
4381 * command has completed, with either an ok or not-ok status.
4384 * spin_lock_irqsave(host_set lock)
4386 void ata_qc_complete(struct ata_queued_cmd *qc)
4388 struct ata_port *ap = qc->ap;
4390 /* XXX: New EH and old EH use different mechanisms to
4391 * synchronize EH with regular execution path.
4393 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4394 * Normal execution path is responsible for not accessing a
4395 * failed qc. libata core enforces the rule by returning NULL
4396 * from ata_qc_from_tag() for failed qcs.
4398 * Old EH depends on ata_qc_complete() nullifying completion
4399 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4400 * not synchronize with interrupt handler. Only PIO task is
4403 if (ap->ops->error_handler) {
4404 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4406 if (unlikely(qc->err_mask))
4407 qc->flags |= ATA_QCFLAG_FAILED;
4409 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4410 if (!ata_tag_internal(qc->tag)) {
4411 /* always fill result TF for failed qc */
4412 ap->ops->tf_read(ap, &qc->result_tf);
4413 ata_qc_schedule_eh(qc);
4418 /* read result TF if requested */
4419 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4420 ap->ops->tf_read(ap, &qc->result_tf);
4422 __ata_qc_complete(qc);
4424 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4427 /* read result TF if failed or requested */
4428 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4429 ap->ops->tf_read(ap, &qc->result_tf);
4431 __ata_qc_complete(qc);
4436 * ata_qc_complete_multiple - Complete multiple qcs successfully
4437 * @ap: port in question
4438 * @qc_active: new qc_active mask
4439 * @finish_qc: LLDD callback invoked before completing a qc
4441 * Complete in-flight commands. This functions is meant to be
4442 * called from low-level driver's interrupt routine to complete
4443 * requests normally. ap->qc_active and @qc_active is compared
4444 * and commands are completed accordingly.
4447 * spin_lock_irqsave(host_set lock)
4450 * Number of completed commands on success, -errno otherwise.
4452 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4453 void (*finish_qc)(struct ata_queued_cmd *))
4459 done_mask = ap->qc_active ^ qc_active;
4461 if (unlikely(done_mask & qc_active)) {
4462 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4463 "(%08x->%08x)\n", ap->qc_active, qc_active);
4467 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4468 struct ata_queued_cmd *qc;
4470 if (!(done_mask & (1 << i)))
4473 if ((qc = ata_qc_from_tag(ap, i))) {
4476 ata_qc_complete(qc);
4484 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4486 struct ata_port *ap = qc->ap;
4488 switch (qc->tf.protocol) {
4491 case ATA_PROT_ATAPI_DMA:
4494 case ATA_PROT_ATAPI:
4496 if (ap->flags & ATA_FLAG_PIO_DMA)
4509 * ata_qc_issue - issue taskfile to device
4510 * @qc: command to issue to device
4512 * Prepare an ATA command to submission to device.
4513 * This includes mapping the data into a DMA-able
4514 * area, filling in the S/G table, and finally
4515 * writing the taskfile to hardware, starting the command.
4518 * spin_lock_irqsave(host_set lock)
4520 void ata_qc_issue(struct ata_queued_cmd *qc)
4522 struct ata_port *ap = qc->ap;
4524 /* Make sure only one non-NCQ command is outstanding. The
4525 * check is skipped for old EH because it reuses active qc to
4526 * request ATAPI sense.
4528 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4530 if (qc->tf.protocol == ATA_PROT_NCQ) {
4531 WARN_ON(ap->sactive & (1 << qc->tag));
4532 ap->sactive |= 1 << qc->tag;
4534 WARN_ON(ap->sactive);
4535 ap->active_tag = qc->tag;
4538 qc->flags |= ATA_QCFLAG_ACTIVE;
4539 ap->qc_active |= 1 << qc->tag;
4541 if (ata_should_dma_map(qc)) {
4542 if (qc->flags & ATA_QCFLAG_SG) {
4543 if (ata_sg_setup(qc))
4545 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4546 if (ata_sg_setup_one(qc))
4550 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4553 ap->ops->qc_prep(qc);
4555 qc->err_mask |= ap->ops->qc_issue(qc);
4556 if (unlikely(qc->err_mask))
4561 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4562 qc->err_mask |= AC_ERR_SYSTEM;
4564 ata_qc_complete(qc);
4568 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4569 * @qc: command to issue to device
4571 * Using various libata functions and hooks, this function
4572 * starts an ATA command. ATA commands are grouped into
4573 * classes called "protocols", and issuing each type of protocol
4574 * is slightly different.
4576 * May be used as the qc_issue() entry in ata_port_operations.
4579 * spin_lock_irqsave(host_set lock)
4582 * Zero on success, AC_ERR_* mask on failure
4585 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4587 struct ata_port *ap = qc->ap;
4589 /* Use polling pio if the LLD doesn't handle
4590 * interrupt driven pio and atapi CDB interrupt.
4592 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4593 switch (qc->tf.protocol) {
4595 case ATA_PROT_ATAPI:
4596 case ATA_PROT_ATAPI_NODATA:
4597 qc->tf.flags |= ATA_TFLAG_POLLING;
4599 case ATA_PROT_ATAPI_DMA:
4600 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4601 /* see ata_dma_blacklisted() */
4609 /* select the device */
4610 ata_dev_select(ap, qc->dev->devno, 1, 0);
4612 /* start the command */
4613 switch (qc->tf.protocol) {
4614 case ATA_PROT_NODATA:
4615 if (qc->tf.flags & ATA_TFLAG_POLLING)
4616 ata_qc_set_polling(qc);
4618 ata_tf_to_host(ap, &qc->tf);
4619 ap->hsm_task_state = HSM_ST_LAST;
4621 if (qc->tf.flags & ATA_TFLAG_POLLING)
4622 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4627 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4629 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4630 ap->ops->bmdma_setup(qc); /* set up bmdma */
4631 ap->ops->bmdma_start(qc); /* initiate bmdma */
4632 ap->hsm_task_state = HSM_ST_LAST;
4636 if (qc->tf.flags & ATA_TFLAG_POLLING)
4637 ata_qc_set_polling(qc);
4639 ata_tf_to_host(ap, &qc->tf);
4641 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4642 /* PIO data out protocol */
4643 ap->hsm_task_state = HSM_ST_FIRST;
4644 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4646 /* always send first data block using
4647 * the ata_pio_task() codepath.
4650 /* PIO data in protocol */
4651 ap->hsm_task_state = HSM_ST;
4653 if (qc->tf.flags & ATA_TFLAG_POLLING)
4654 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4656 /* if polling, ata_pio_task() handles the rest.
4657 * otherwise, interrupt handler takes over from here.
4663 case ATA_PROT_ATAPI:
4664 case ATA_PROT_ATAPI_NODATA:
4665 if (qc->tf.flags & ATA_TFLAG_POLLING)
4666 ata_qc_set_polling(qc);
4668 ata_tf_to_host(ap, &qc->tf);
4670 ap->hsm_task_state = HSM_ST_FIRST;
4672 /* send cdb by polling if no cdb interrupt */
4673 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4674 (qc->tf.flags & ATA_TFLAG_POLLING))
4675 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4678 case ATA_PROT_ATAPI_DMA:
4679 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4681 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4682 ap->ops->bmdma_setup(qc); /* set up bmdma */
4683 ap->hsm_task_state = HSM_ST_FIRST;
4685 /* send cdb by polling if no cdb interrupt */
4686 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4687 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4692 return AC_ERR_SYSTEM;
4699 * ata_host_intr - Handle host interrupt for given (port, task)
4700 * @ap: Port on which interrupt arrived (possibly...)
4701 * @qc: Taskfile currently active in engine
4703 * Handle host interrupt for given queued command. Currently,
4704 * only DMA interrupts are handled. All other commands are
4705 * handled via polling with interrupts disabled (nIEN bit).
4708 * spin_lock_irqsave(host_set lock)
4711 * One if interrupt was handled, zero if not (shared irq).
4714 inline unsigned int ata_host_intr (struct ata_port *ap,
4715 struct ata_queued_cmd *qc)
4717 u8 status, host_stat = 0;
4719 VPRINTK("ata%u: protocol %d task_state %d\n",
4720 ap->id, qc->tf.protocol, ap->hsm_task_state);
4722 /* Check whether we are expecting interrupt in this state */
4723 switch (ap->hsm_task_state) {
4725 /* Some pre-ATAPI-4 devices assert INTRQ
4726 * at this state when ready to receive CDB.
4729 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4730 * The flag was turned on only for atapi devices.
4731 * No need to check is_atapi_taskfile(&qc->tf) again.
4733 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4737 if (qc->tf.protocol == ATA_PROT_DMA ||
4738 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4739 /* check status of DMA engine */
4740 host_stat = ap->ops->bmdma_status(ap);
4741 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4743 /* if it's not our irq... */
4744 if (!(host_stat & ATA_DMA_INTR))
4747 /* before we do anything else, clear DMA-Start bit */
4748 ap->ops->bmdma_stop(qc);
4750 if (unlikely(host_stat & ATA_DMA_ERR)) {
4751 /* error when transfering data to/from memory */
4752 qc->err_mask |= AC_ERR_HOST_BUS;
4753 ap->hsm_task_state = HSM_ST_ERR;
4763 /* check altstatus */
4764 status = ata_altstatus(ap);
4765 if (status & ATA_BUSY)
4768 /* check main status, clearing INTRQ */
4769 status = ata_chk_status(ap);
4770 if (unlikely(status & ATA_BUSY))
4773 /* ack bmdma irq events */
4774 ap->ops->irq_clear(ap);
4776 ata_hsm_move(ap, qc, status, 0);
4777 return 1; /* irq handled */
4780 ap->stats.idle_irq++;
4783 if ((ap->stats.idle_irq % 1000) == 0) {
4784 ata_irq_ack(ap, 0); /* debug trap */
4785 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4789 return 0; /* irq not handled */
4793 * ata_interrupt - Default ATA host interrupt handler
4794 * @irq: irq line (unused)
4795 * @dev_instance: pointer to our ata_host_set information structure
4798 * Default interrupt handler for PCI IDE devices. Calls
4799 * ata_host_intr() for each port that is not disabled.
4802 * Obtains host_set lock during operation.
4805 * IRQ_NONE or IRQ_HANDLED.
4808 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4810 struct ata_host_set *host_set = dev_instance;
4812 unsigned int handled = 0;
4813 unsigned long flags;
4815 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4816 spin_lock_irqsave(&host_set->lock, flags);
4818 for (i = 0; i < host_set->n_ports; i++) {
4819 struct ata_port *ap;
4821 ap = host_set->ports[i];
4823 !(ap->flags & ATA_FLAG_DISABLED)) {
4824 struct ata_queued_cmd *qc;
4826 qc = ata_qc_from_tag(ap, ap->active_tag);
4827 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4828 (qc->flags & ATA_QCFLAG_ACTIVE))
4829 handled |= ata_host_intr(ap, qc);
4833 spin_unlock_irqrestore(&host_set->lock, flags);
4835 return IRQ_RETVAL(handled);
4839 * sata_scr_valid - test whether SCRs are accessible
4840 * @ap: ATA port to test SCR accessibility for
4842 * Test whether SCRs are accessible for @ap.
4848 * 1 if SCRs are accessible, 0 otherwise.
4850 int sata_scr_valid(struct ata_port *ap)
4852 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4856 * sata_scr_read - read SCR register of the specified port
4857 * @ap: ATA port to read SCR for
4859 * @val: Place to store read value
4861 * Read SCR register @reg of @ap into *@val. This function is
4862 * guaranteed to succeed if the cable type of the port is SATA
4863 * and the port implements ->scr_read.
4869 * 0 on success, negative errno on failure.
4871 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4873 if (sata_scr_valid(ap)) {
4874 *val = ap->ops->scr_read(ap, reg);
4881 * sata_scr_write - write SCR register of the specified port
4882 * @ap: ATA port to write SCR for
4883 * @reg: SCR to write
4884 * @val: value to write
4886 * Write @val to SCR register @reg of @ap. This function is
4887 * guaranteed to succeed if the cable type of the port is SATA
4888 * and the port implements ->scr_read.
4894 * 0 on success, negative errno on failure.
4896 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4898 if (sata_scr_valid(ap)) {
4899 ap->ops->scr_write(ap, reg, val);
4906 * sata_scr_write_flush - write SCR register of the specified port and flush
4907 * @ap: ATA port to write SCR for
4908 * @reg: SCR to write
4909 * @val: value to write
4911 * This function is identical to sata_scr_write() except that this
4912 * function performs flush after writing to the register.
4918 * 0 on success, negative errno on failure.
4920 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4922 if (sata_scr_valid(ap)) {
4923 ap->ops->scr_write(ap, reg, val);
4924 ap->ops->scr_read(ap, reg);
4931 * ata_port_online - test whether the given port is online
4932 * @ap: ATA port to test
4934 * Test whether @ap is online. Note that this function returns 0
4935 * if online status of @ap cannot be obtained, so
4936 * ata_port_online(ap) != !ata_port_offline(ap).
4942 * 1 if the port online status is available and online.
4944 int ata_port_online(struct ata_port *ap)
4948 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4954 * ata_port_offline - test whether the given port is offline
4955 * @ap: ATA port to test
4957 * Test whether @ap is offline. Note that this function returns
4958 * 0 if offline status of @ap cannot be obtained, so
4959 * ata_port_online(ap) != !ata_port_offline(ap).
4965 * 1 if the port offline status is available and offline.
4967 int ata_port_offline(struct ata_port *ap)
4971 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4976 static int ata_flush_cache(struct ata_device *dev)
4978 unsigned int err_mask;
4981 if (!ata_try_flush_cache(dev))
4984 if (ata_id_has_flush_ext(dev->id))
4985 cmd = ATA_CMD_FLUSH_EXT;
4987 cmd = ATA_CMD_FLUSH;
4989 err_mask = ata_do_simple_cmd(dev, cmd);
4991 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
4998 static int ata_standby_drive(struct ata_device *dev)
5000 unsigned int err_mask;
5002 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
5004 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5005 "(err_mask=0x%x)\n", err_mask);
5012 static int ata_start_drive(struct ata_device *dev)
5014 unsigned int err_mask;
5016 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
5018 ata_dev_printk(dev, KERN_ERR, "failed to start drive "
5019 "(err_mask=0x%x)\n", err_mask);
5027 * ata_device_resume - wakeup a previously suspended devices
5028 * @dev: the device to resume
5030 * Kick the drive back into action, by sending it an idle immediate
5031 * command and making sure its transfer mode matches between drive
5035 int ata_device_resume(struct ata_device *dev)
5037 struct ata_port *ap = dev->ap;
5039 if (ap->flags & ATA_FLAG_SUSPENDED) {
5040 struct ata_device *failed_dev;
5042 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
5043 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5045 ap->flags &= ~ATA_FLAG_SUSPENDED;
5046 while (ata_set_mode(ap, &failed_dev))
5047 ata_dev_disable(failed_dev);
5049 if (!ata_dev_enabled(dev))
5051 if (dev->class == ATA_DEV_ATA)
5052 ata_start_drive(dev);
5058 * ata_device_suspend - prepare a device for suspend
5059 * @dev: the device to suspend
5060 * @state: target power management state
5062 * Flush the cache on the drive, if appropriate, then issue a
5063 * standbynow command.
5065 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5067 struct ata_port *ap = dev->ap;
5069 if (!ata_dev_enabled(dev))
5071 if (dev->class == ATA_DEV_ATA)
5072 ata_flush_cache(dev);
5074 if (state.event != PM_EVENT_FREEZE)
5075 ata_standby_drive(dev);
5076 ap->flags |= ATA_FLAG_SUSPENDED;
5081 * ata_port_start - Set port up for dma.
5082 * @ap: Port to initialize
5084 * Called just after data structures for each port are
5085 * initialized. Allocates space for PRD table.
5087 * May be used as the port_start() entry in ata_port_operations.
5090 * Inherited from caller.
5093 int ata_port_start (struct ata_port *ap)
5095 struct device *dev = ap->dev;
5098 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5102 rc = ata_pad_alloc(ap, dev);
5104 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5108 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5115 * ata_port_stop - Undo ata_port_start()
5116 * @ap: Port to shut down
5118 * Frees the PRD table.
5120 * May be used as the port_stop() entry in ata_port_operations.
5123 * Inherited from caller.
5126 void ata_port_stop (struct ata_port *ap)
5128 struct device *dev = ap->dev;
5130 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5131 ata_pad_free(ap, dev);
5134 void ata_host_stop (struct ata_host_set *host_set)
5136 if (host_set->mmio_base)
5137 iounmap(host_set->mmio_base);
5142 * ata_host_remove - Unregister SCSI host structure with upper layers
5143 * @ap: Port to unregister
5144 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5147 * Inherited from caller.
5150 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5152 struct Scsi_Host *sh = ap->host;
5157 scsi_remove_host(sh);
5159 ap->ops->port_stop(ap);
5163 * ata_dev_init - Initialize an ata_device structure
5164 * @dev: Device structure to initialize
5166 * Initialize @dev in preparation for probing.
5169 * Inherited from caller.
5171 void ata_dev_init(struct ata_device *dev)
5173 struct ata_port *ap = dev->ap;
5174 unsigned long flags;
5176 /* SATA spd limit is bound to the first device */
5177 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5179 /* High bits of dev->flags are used to record warm plug
5180 * requests which occur asynchronously. Synchronize using
5183 spin_lock_irqsave(ap->lock, flags);
5184 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5185 spin_unlock_irqrestore(ap->lock, flags);
5187 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5188 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5189 dev->pio_mask = UINT_MAX;
5190 dev->mwdma_mask = UINT_MAX;
5191 dev->udma_mask = UINT_MAX;
5195 * ata_host_init - Initialize an ata_port structure
5196 * @ap: Structure to initialize
5197 * @host: associated SCSI mid-layer structure
5198 * @host_set: Collection of hosts to which @ap belongs
5199 * @ent: Probe information provided by low-level driver
5200 * @port_no: Port number associated with this ata_port
5202 * Initialize a new ata_port structure, and its associated
5206 * Inherited from caller.
5208 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5209 struct ata_host_set *host_set,
5210 const struct ata_probe_ent *ent, unsigned int port_no)
5216 host->max_channel = 1;
5217 host->unique_id = ata_unique_id++;
5218 host->max_cmd_len = 12;
5220 ap->lock = &host_set->lock;
5221 ap->flags = ATA_FLAG_DISABLED;
5222 ap->id = host->unique_id;
5224 ap->ctl = ATA_DEVCTL_OBS;
5225 ap->host_set = host_set;
5227 ap->port_no = port_no;
5229 ent->legacy_mode ? ent->hard_port_no : port_no;
5230 ap->pio_mask = ent->pio_mask;
5231 ap->mwdma_mask = ent->mwdma_mask;
5232 ap->udma_mask = ent->udma_mask;
5233 ap->flags |= ent->host_flags;
5234 ap->ops = ent->port_ops;
5235 ap->hw_sata_spd_limit = UINT_MAX;
5236 ap->active_tag = ATA_TAG_POISON;
5237 ap->last_ctl = 0xFF;
5239 #if defined(ATA_VERBOSE_DEBUG)
5240 /* turn on all debugging levels */
5241 ap->msg_enable = 0x00FF;
5242 #elif defined(ATA_DEBUG)
5243 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5245 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5248 INIT_WORK(&ap->port_task, NULL, NULL);
5249 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5250 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5251 INIT_LIST_HEAD(&ap->eh_done_q);
5252 init_waitqueue_head(&ap->eh_wait_q);
5254 /* set cable type */
5255 ap->cbl = ATA_CBL_NONE;
5256 if (ap->flags & ATA_FLAG_SATA)
5257 ap->cbl = ATA_CBL_SATA;
5259 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5260 struct ata_device *dev = &ap->device[i];
5267 ap->stats.unhandled_irq = 1;
5268 ap->stats.idle_irq = 1;
5271 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5275 * ata_host_add - Attach low-level ATA driver to system
5276 * @ent: Information provided by low-level driver
5277 * @host_set: Collections of ports to which we add
5278 * @port_no: Port number associated with this host
5280 * Attach low-level ATA driver to system.
5283 * PCI/etc. bus probe sem.
5286 * New ata_port on success, for NULL on error.
5289 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5290 struct ata_host_set *host_set,
5291 unsigned int port_no)
5293 struct Scsi_Host *host;
5294 struct ata_port *ap;
5299 if (!ent->port_ops->error_handler &&
5300 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5301 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5306 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5310 host->transportt = &ata_scsi_transport_template;
5312 ap = ata_shost_to_port(host);
5314 ata_host_init(ap, host, host_set, ent, port_no);
5316 rc = ap->ops->port_start(ap);
5323 scsi_host_put(host);
5328 * ata_device_add - Register hardware device with ATA and SCSI layers
5329 * @ent: Probe information describing hardware device to be registered
5331 * This function processes the information provided in the probe
5332 * information struct @ent, allocates the necessary ATA and SCSI
5333 * host information structures, initializes them, and registers
5334 * everything with requisite kernel subsystems.
5336 * This function requests irqs, probes the ATA bus, and probes
5340 * PCI/etc. bus probe sem.
5343 * Number of ports registered. Zero on error (no ports registered).
5345 int ata_device_add(const struct ata_probe_ent *ent)
5347 unsigned int count = 0, i;
5348 struct device *dev = ent->dev;
5349 struct ata_host_set *host_set;
5353 /* alloc a container for our list of ATA ports (buses) */
5354 host_set = kzalloc(sizeof(struct ata_host_set) +
5355 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5358 spin_lock_init(&host_set->lock);
5360 host_set->dev = dev;
5361 host_set->n_ports = ent->n_ports;
5362 host_set->irq = ent->irq;
5363 host_set->mmio_base = ent->mmio_base;
5364 host_set->private_data = ent->private_data;
5365 host_set->ops = ent->port_ops;
5366 host_set->flags = ent->host_set_flags;
5368 /* register each port bound to this device */
5369 for (i = 0; i < ent->n_ports; i++) {
5370 struct ata_port *ap;
5371 unsigned long xfer_mode_mask;
5373 ap = ata_host_add(ent, host_set, i);
5377 host_set->ports[i] = ap;
5378 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5379 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5380 (ap->pio_mask << ATA_SHIFT_PIO);
5382 /* print per-port info to dmesg */
5383 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5384 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5385 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5386 ata_mode_string(xfer_mode_mask),
5387 ap->ioaddr.cmd_addr,
5388 ap->ioaddr.ctl_addr,
5389 ap->ioaddr.bmdma_addr,
5393 host_set->ops->irq_clear(ap);
5394 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5401 /* obtain irq, that is shared between channels */
5402 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5403 DRV_NAME, host_set);
5405 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5410 /* perform each probe synchronously */
5411 DPRINTK("probe begin\n");
5412 for (i = 0; i < count; i++) {
5413 struct ata_port *ap;
5417 ap = host_set->ports[i];
5419 /* init sata_spd_limit to the current value */
5420 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5421 int spd = (scontrol >> 4) & 0xf;
5422 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5424 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5426 rc = scsi_add_host(ap->host, dev);
5428 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5429 /* FIXME: do something useful here */
5430 /* FIXME: handle unconditional calls to
5431 * scsi_scan_host and ata_host_remove, below,
5436 if (ap->ops->error_handler) {
5437 unsigned long flags;
5441 /* kick EH for boot probing */
5442 spin_lock_irqsave(ap->lock, flags);
5444 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5445 ap->eh_info.action |= ATA_EH_SOFTRESET;
5447 ap->flags |= ATA_FLAG_LOADING;
5448 ata_port_schedule_eh(ap);
5450 spin_unlock_irqrestore(ap->lock, flags);
5452 /* wait for EH to finish */
5453 ata_port_wait_eh(ap);
5455 DPRINTK("ata%u: bus probe begin\n", ap->id);
5456 rc = ata_bus_probe(ap);
5457 DPRINTK("ata%u: bus probe end\n", ap->id);
5460 /* FIXME: do something useful here?
5461 * Current libata behavior will
5462 * tear down everything when
5463 * the module is removed
5464 * or the h/w is unplugged.
5470 /* probes are done, now scan each port's disk(s) */
5471 DPRINTK("host probe begin\n");
5472 for (i = 0; i < count; i++) {
5473 struct ata_port *ap = host_set->ports[i];
5475 ata_scsi_scan_host(ap);
5478 dev_set_drvdata(dev, host_set);
5480 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5481 return ent->n_ports; /* success */
5484 for (i = 0; i < count; i++) {
5485 ata_host_remove(host_set->ports[i], 1);
5486 scsi_host_put(host_set->ports[i]->host);
5490 VPRINTK("EXIT, returning 0\n");
5495 * ata_port_detach - Detach ATA port in prepration of device removal
5496 * @ap: ATA port to be detached
5498 * Detach all ATA devices and the associated SCSI devices of @ap;
5499 * then, remove the associated SCSI host. @ap is guaranteed to
5500 * be quiescent on return from this function.
5503 * Kernel thread context (may sleep).
5505 void ata_port_detach(struct ata_port *ap)
5507 unsigned long flags;
5510 if (!ap->ops->error_handler)
5513 /* tell EH we're leaving & flush EH */
5514 spin_lock_irqsave(ap->lock, flags);
5515 ap->flags |= ATA_FLAG_UNLOADING;
5516 spin_unlock_irqrestore(ap->lock, flags);
5518 ata_port_wait_eh(ap);
5520 /* EH is now guaranteed to see UNLOADING, so no new device
5521 * will be attached. Disable all existing devices.
5523 spin_lock_irqsave(ap->lock, flags);
5525 for (i = 0; i < ATA_MAX_DEVICES; i++)
5526 ata_dev_disable(&ap->device[i]);
5528 spin_unlock_irqrestore(ap->lock, flags);
5530 /* Final freeze & EH. All in-flight commands are aborted. EH
5531 * will be skipped and retrials will be terminated with bad
5534 spin_lock_irqsave(ap->lock, flags);
5535 ata_port_freeze(ap); /* won't be thawed */
5536 spin_unlock_irqrestore(ap->lock, flags);
5538 ata_port_wait_eh(ap);
5540 /* Flush hotplug task. The sequence is similar to
5541 * ata_port_flush_task().
5543 flush_workqueue(ata_aux_wq);
5544 cancel_delayed_work(&ap->hotplug_task);
5545 flush_workqueue(ata_aux_wq);
5547 /* remove the associated SCSI host */
5548 scsi_remove_host(ap->host);
5552 * ata_host_set_remove - PCI layer callback for device removal
5553 * @host_set: ATA host set that was removed
5555 * Unregister all objects associated with this host set. Free those
5559 * Inherited from calling layer (may sleep).
5562 void ata_host_set_remove(struct ata_host_set *host_set)
5566 for (i = 0; i < host_set->n_ports; i++)
5567 ata_port_detach(host_set->ports[i]);
5569 free_irq(host_set->irq, host_set);
5571 for (i = 0; i < host_set->n_ports; i++) {
5572 struct ata_port *ap = host_set->ports[i];
5574 ata_scsi_release(ap->host);
5576 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5577 struct ata_ioports *ioaddr = &ap->ioaddr;
5579 if (ioaddr->cmd_addr == 0x1f0)
5580 release_region(0x1f0, 8);
5581 else if (ioaddr->cmd_addr == 0x170)
5582 release_region(0x170, 8);
5585 scsi_host_put(ap->host);
5588 if (host_set->ops->host_stop)
5589 host_set->ops->host_stop(host_set);
5595 * ata_scsi_release - SCSI layer callback hook for host unload
5596 * @host: libata host to be unloaded
5598 * Performs all duties necessary to shut down a libata port...
5599 * Kill port kthread, disable port, and release resources.
5602 * Inherited from SCSI layer.
5608 int ata_scsi_release(struct Scsi_Host *host)
5610 struct ata_port *ap = ata_shost_to_port(host);
5614 ap->ops->port_disable(ap);
5615 ata_host_remove(ap, 0);
5622 * ata_std_ports - initialize ioaddr with standard port offsets.
5623 * @ioaddr: IO address structure to be initialized
5625 * Utility function which initializes data_addr, error_addr,
5626 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5627 * device_addr, status_addr, and command_addr to standard offsets
5628 * relative to cmd_addr.
5630 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5633 void ata_std_ports(struct ata_ioports *ioaddr)
5635 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5636 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5637 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5638 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5639 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5640 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5641 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5642 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5643 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5644 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5650 void ata_pci_host_stop (struct ata_host_set *host_set)
5652 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5654 pci_iounmap(pdev, host_set->mmio_base);
5658 * ata_pci_remove_one - PCI layer callback for device removal
5659 * @pdev: PCI device that was removed
5661 * PCI layer indicates to libata via this hook that
5662 * hot-unplug or module unload event has occurred.
5663 * Handle this by unregistering all objects associated
5664 * with this PCI device. Free those objects. Then finally
5665 * release PCI resources and disable device.
5668 * Inherited from PCI layer (may sleep).
5671 void ata_pci_remove_one (struct pci_dev *pdev)
5673 struct device *dev = pci_dev_to_dev(pdev);
5674 struct ata_host_set *host_set = dev_get_drvdata(dev);
5675 struct ata_host_set *host_set2 = host_set->next;
5677 ata_host_set_remove(host_set);
5679 ata_host_set_remove(host_set2);
5681 pci_release_regions(pdev);
5682 pci_disable_device(pdev);
5683 dev_set_drvdata(dev, NULL);
5686 /* move to PCI subsystem */
5687 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5689 unsigned long tmp = 0;
5691 switch (bits->width) {
5694 pci_read_config_byte(pdev, bits->reg, &tmp8);
5700 pci_read_config_word(pdev, bits->reg, &tmp16);
5706 pci_read_config_dword(pdev, bits->reg, &tmp32);
5717 return (tmp == bits->val) ? 1 : 0;
5720 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5722 pci_save_state(pdev);
5723 pci_disable_device(pdev);
5724 pci_set_power_state(pdev, PCI_D3hot);
5728 int ata_pci_device_resume(struct pci_dev *pdev)
5730 pci_set_power_state(pdev, PCI_D0);
5731 pci_restore_state(pdev);
5732 pci_enable_device(pdev);
5733 pci_set_master(pdev);
5736 #endif /* CONFIG_PCI */
5739 static int __init ata_init(void)
5741 ata_wq = create_workqueue("ata");
5745 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5747 destroy_workqueue(ata_wq);
5751 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5755 static void __exit ata_exit(void)
5757 destroy_workqueue(ata_wq);
5758 destroy_workqueue(ata_aux_wq);
5761 module_init(ata_init);
5762 module_exit(ata_exit);
5764 static unsigned long ratelimit_time;
5765 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5767 int ata_ratelimit(void)
5770 unsigned long flags;
5772 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5774 if (time_after(jiffies, ratelimit_time)) {
5776 ratelimit_time = jiffies + (HZ/5);
5780 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5786 * ata_wait_register - wait until register value changes
5787 * @reg: IO-mapped register
5788 * @mask: Mask to apply to read register value
5789 * @val: Wait condition
5790 * @interval_msec: polling interval in milliseconds
5791 * @timeout_msec: timeout in milliseconds
5793 * Waiting for some bits of register to change is a common
5794 * operation for ATA controllers. This function reads 32bit LE
5795 * IO-mapped register @reg and tests for the following condition.
5797 * (*@reg & mask) != val
5799 * If the condition is met, it returns; otherwise, the process is
5800 * repeated after @interval_msec until timeout.
5803 * Kernel thread context (may sleep)
5806 * The final register value.
5808 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5809 unsigned long interval_msec,
5810 unsigned long timeout_msec)
5812 unsigned long timeout;
5815 tmp = ioread32(reg);
5817 /* Calculate timeout _after_ the first read to make sure
5818 * preceding writes reach the controller before starting to
5819 * eat away the timeout.
5821 timeout = jiffies + (timeout_msec * HZ) / 1000;
5823 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5824 msleep(interval_msec);
5825 tmp = ioread32(reg);
5832 * libata is essentially a library of internal helper functions for
5833 * low-level ATA host controller drivers. As such, the API/ABI is
5834 * likely to change as new drivers are added and updated.
5835 * Do not depend on ABI/API stability.
5838 EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5839 EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5840 EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
5841 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5842 EXPORT_SYMBOL_GPL(ata_std_ports);
5843 EXPORT_SYMBOL_GPL(ata_device_add);
5844 EXPORT_SYMBOL_GPL(ata_port_detach);
5845 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5846 EXPORT_SYMBOL_GPL(ata_sg_init);
5847 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5848 EXPORT_SYMBOL_GPL(ata_hsm_move);
5849 EXPORT_SYMBOL_GPL(ata_qc_complete);
5850 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5851 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5852 EXPORT_SYMBOL_GPL(ata_tf_load);
5853 EXPORT_SYMBOL_GPL(ata_tf_read);
5854 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5855 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5856 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5857 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5858 EXPORT_SYMBOL_GPL(ata_check_status);
5859 EXPORT_SYMBOL_GPL(ata_altstatus);
5860 EXPORT_SYMBOL_GPL(ata_exec_command);
5861 EXPORT_SYMBOL_GPL(ata_port_start);
5862 EXPORT_SYMBOL_GPL(ata_port_stop);
5863 EXPORT_SYMBOL_GPL(ata_host_stop);
5864 EXPORT_SYMBOL_GPL(ata_interrupt);
5865 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5866 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5867 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5868 EXPORT_SYMBOL_GPL(ata_qc_prep);
5869 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5870 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5871 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5872 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5873 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5874 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5875 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5876 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5877 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5878 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5879 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5880 EXPORT_SYMBOL_GPL(ata_port_probe);
5881 EXPORT_SYMBOL_GPL(sata_set_spd);
5882 EXPORT_SYMBOL_GPL(sata_phy_debounce);
5883 EXPORT_SYMBOL_GPL(sata_phy_resume);
5884 EXPORT_SYMBOL_GPL(sata_phy_reset);
5885 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5886 EXPORT_SYMBOL_GPL(ata_bus_reset);
5887 EXPORT_SYMBOL_GPL(ata_std_prereset);
5888 EXPORT_SYMBOL_GPL(ata_std_softreset);
5889 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5890 EXPORT_SYMBOL_GPL(ata_std_postreset);
5891 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5892 EXPORT_SYMBOL_GPL(ata_dev_classify);
5893 EXPORT_SYMBOL_GPL(ata_dev_pair);
5894 EXPORT_SYMBOL_GPL(ata_port_disable);
5895 EXPORT_SYMBOL_GPL(ata_ratelimit);
5896 EXPORT_SYMBOL_GPL(ata_wait_register);
5897 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5898 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5899 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5900 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5901 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5902 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5903 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5904 EXPORT_SYMBOL_GPL(ata_scsi_release);
5905 EXPORT_SYMBOL_GPL(ata_host_intr);
5906 EXPORT_SYMBOL_GPL(sata_scr_valid);
5907 EXPORT_SYMBOL_GPL(sata_scr_read);
5908 EXPORT_SYMBOL_GPL(sata_scr_write);
5909 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5910 EXPORT_SYMBOL_GPL(ata_port_online);
5911 EXPORT_SYMBOL_GPL(ata_port_offline);
5912 EXPORT_SYMBOL_GPL(ata_id_string);
5913 EXPORT_SYMBOL_GPL(ata_id_c_string);
5914 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5916 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5917 EXPORT_SYMBOL_GPL(ata_timing_compute);
5918 EXPORT_SYMBOL_GPL(ata_timing_merge);
5921 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5922 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5923 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5924 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5925 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5926 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5927 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5928 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5929 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5930 #endif /* CONFIG_PCI */
5932 EXPORT_SYMBOL_GPL(ata_device_suspend);
5933 EXPORT_SYMBOL_GPL(ata_device_resume);
5934 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5935 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5937 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5938 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5939 EXPORT_SYMBOL_GPL(ata_port_abort);
5940 EXPORT_SYMBOL_GPL(ata_port_freeze);
5941 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5942 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5943 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5944 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5945 EXPORT_SYMBOL_GPL(ata_do_eh);