2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_device *dev);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 struct workqueue_struct *ata_aux_wq;
77 int atapi_enabled = 1;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82 module_param(atapi_dmadir, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86 module_param_named(fua, libata_fua, int, 0444);
87 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90 module_param(ata_probe_timeout, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
131 fis[13] = tf->hob_nsect;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
167 tf->hob_nsect = fis[13];
170 static const u8 ata_rw_cmds[] = {
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
210 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
216 int index, fua, lba48, write;
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
230 tf->protocol = ATA_PROT_DMA;
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
257 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
276 static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
289 static const struct ata_xfer_ent {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
310 * Matching XFER_* value, 0 if no match found.
312 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
327 * Return matching xfer_mask for @xfer_mode.
333 * Matching xfer_mask, 0 if no match found.
335 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337 const struct ata_xfer_ent *ent;
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
349 * Return matching xfer_shift for @xfer_mode.
355 * Matching xfer_shift, -1 if no match found.
357 static int ata_xfer_mode2shift(unsigned int xfer_mode)
359 const struct ata_xfer_ent *ent;
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
381 static const char *ata_mode_string(unsigned int xfer_mask)
383 static const char * const xfer_mode_str[] = {
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
413 static const char *sata_spd_string(unsigned int spd)
415 static const char * const spd_str[] = {
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
422 return spd_str[spd - 1];
425 void ata_dev_disable(struct ata_device *dev)
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
451 static unsigned int ata_pio_devchk(struct ata_port *ap,
454 struct ata_ioports *ioaddr = &ap->ioaddr;
457 ap->ops->dev_select(ap, device);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
474 return 0; /* nothing found */
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
495 static unsigned int ata_mmio_devchk(struct ata_port *ap,
498 struct ata_ioports *ioaddr = &ap->ioaddr;
501 ap->ops->dev_select(ap, device);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
518 return 0; /* nothing found */
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
534 static unsigned int ata_devchk(struct ata_port *ap,
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
558 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
604 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
606 struct ata_taskfile tf;
610 ap->ops->dev_select(ap, device);
612 memset(&tf, 0, sizeof(tf));
614 ap->ops->tf_read(ap, &tf);
619 /* see if device passed diags */
622 else if ((device == 0) && (err == 0x81))
627 /* determine if device is ATA or ATAPI */
628 class = ata_dev_classify(&tf);
630 if (class == ATA_DEV_UNKNOWN)
632 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
638 * ata_id_string - Convert IDENTIFY DEVICE page into string
639 * @id: IDENTIFY DEVICE results we will examine
640 * @s: string into which data is output
641 * @ofs: offset into identify device page
642 * @len: length of string to return. must be an even number.
644 * The strings in the IDENTIFY DEVICE page are broken up into
645 * 16-bit chunks. Run through the string, and output each
646 * 8-bit chunk linearly, regardless of platform.
652 void ata_id_string(const u16 *id, unsigned char *s,
653 unsigned int ofs, unsigned int len)
672 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
673 * @id: IDENTIFY DEVICE results we will examine
674 * @s: string into which data is output
675 * @ofs: offset into identify device page
676 * @len: length of string to return. must be an odd number.
678 * This function is identical to ata_id_string except that it
679 * trims trailing spaces and terminates the resulting string with
680 * null. @len must be actual maximum length (even number) + 1.
685 void ata_id_c_string(const u16 *id, unsigned char *s,
686 unsigned int ofs, unsigned int len)
692 ata_id_string(id, s, ofs, len - 1);
694 p = s + strnlen(s, len - 1);
695 while (p > s && p[-1] == ' ')
700 static u64 ata_id_n_sectors(const u16 *id)
702 if (ata_id_has_lba(id)) {
703 if (ata_id_has_lba48(id))
704 return ata_id_u64(id, 100);
706 return ata_id_u32(id, 60);
708 if (ata_id_current_chs_valid(id))
709 return ata_id_u32(id, 57);
711 return id[1] * id[3] * id[6];
716 * ata_noop_dev_select - Select device 0/1 on ATA bus
717 * @ap: ATA channel to manipulate
718 * @device: ATA device (numbered from zero) to select
720 * This function performs no actual function.
722 * May be used as the dev_select() entry in ata_port_operations.
727 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
733 * ata_std_dev_select - Select device 0/1 on ATA bus
734 * @ap: ATA channel to manipulate
735 * @device: ATA device (numbered from zero) to select
737 * Use the method defined in the ATA specification to
738 * make either device 0, or device 1, active on the
739 * ATA channel. Works with both PIO and MMIO.
741 * May be used as the dev_select() entry in ata_port_operations.
747 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
752 tmp = ATA_DEVICE_OBS;
754 tmp = ATA_DEVICE_OBS | ATA_DEV1;
756 if (ap->flags & ATA_FLAG_MMIO) {
757 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
759 outb(tmp, ap->ioaddr.device_addr);
761 ata_pause(ap); /* needed; also flushes, for mmio */
765 * ata_dev_select - Select device 0/1 on ATA bus
766 * @ap: ATA channel to manipulate
767 * @device: ATA device (numbered from zero) to select
768 * @wait: non-zero to wait for Status register BSY bit to clear
769 * @can_sleep: non-zero if context allows sleeping
771 * Use the method defined in the ATA specification to
772 * make either device 0, or device 1, active on the
775 * This is a high-level version of ata_std_dev_select(),
776 * which additionally provides the services of inserting
777 * the proper pauses and status polling, where needed.
783 void ata_dev_select(struct ata_port *ap, unsigned int device,
784 unsigned int wait, unsigned int can_sleep)
786 if (ata_msg_probe(ap))
787 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
788 "device %u, wait %u\n", ap->id, device, wait);
793 ap->ops->dev_select(ap, device);
796 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
803 * ata_dump_id - IDENTIFY DEVICE info debugging output
804 * @id: IDENTIFY DEVICE page to dump
806 * Dump selected 16-bit words from the given IDENTIFY DEVICE
813 static inline void ata_dump_id(const u16 *id)
815 DPRINTK("49==0x%04x "
825 DPRINTK("80==0x%04x "
835 DPRINTK("88==0x%04x "
842 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
843 * @id: IDENTIFY data to compute xfer mask from
845 * Compute the xfermask for this device. This is not as trivial
846 * as it seems if we must consider early devices correctly.
848 * FIXME: pre IDE drive timing (do we care ?).
856 static unsigned int ata_id_xfermask(const u16 *id)
858 unsigned int pio_mask, mwdma_mask, udma_mask;
860 /* Usual case. Word 53 indicates word 64 is valid */
861 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
862 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
866 /* If word 64 isn't valid then Word 51 high byte holds
867 * the PIO timing number for the maximum. Turn it into
870 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
872 /* But wait.. there's more. Design your standards by
873 * committee and you too can get a free iordy field to
874 * process. However its the speeds not the modes that
875 * are supported... Note drivers using the timing API
876 * will get this right anyway
880 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
882 if (ata_id_is_cfa(id)) {
884 * Process compact flash extended modes
886 int pio = id[163] & 0x7;
887 int dma = (id[163] >> 3) & 7;
890 pio_mask |= (1 << 5);
892 pio_mask |= (1 << 6);
894 mwdma_mask |= (1 << 3);
896 mwdma_mask |= (1 << 4);
900 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
901 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
903 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
907 * ata_port_queue_task - Queue port_task
908 * @ap: The ata_port to queue port_task for
909 * @fn: workqueue function to be scheduled
910 * @data: data value to pass to workqueue function
911 * @delay: delay time for workqueue function
913 * Schedule @fn(@data) for execution after @delay jiffies using
914 * port_task. There is one port_task per port and it's the
915 * user(low level driver)'s responsibility to make sure that only
916 * one task is active at any given time.
918 * libata core layer takes care of synchronization between
919 * port_task and EH. ata_port_queue_task() may be ignored for EH
923 * Inherited from caller.
925 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
930 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
933 PREPARE_WORK(&ap->port_task, fn, data);
936 rc = queue_work(ata_wq, &ap->port_task);
938 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
940 /* rc == 0 means that another user is using port task */
945 * ata_port_flush_task - Flush port_task
946 * @ap: The ata_port to flush port_task for
948 * After this function completes, port_task is guranteed not to
949 * be running or scheduled.
952 * Kernel thread context (may sleep)
954 void ata_port_flush_task(struct ata_port *ap)
960 spin_lock_irqsave(ap->lock, flags);
961 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
962 spin_unlock_irqrestore(ap->lock, flags);
964 DPRINTK("flush #1\n");
965 flush_workqueue(ata_wq);
968 * At this point, if a task is running, it's guaranteed to see
969 * the FLUSH flag; thus, it will never queue pio tasks again.
972 if (!cancel_delayed_work(&ap->port_task)) {
974 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
976 flush_workqueue(ata_wq);
979 spin_lock_irqsave(ap->lock, flags);
980 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
981 spin_unlock_irqrestore(ap->lock, flags);
984 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
987 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
989 struct completion *waiting = qc->private_data;
995 * ata_exec_internal - execute libata internal command
996 * @dev: Device to which the command is sent
997 * @tf: Taskfile registers for the command and the result
998 * @cdb: CDB for packet command
999 * @dma_dir: Data tranfer direction of the command
1000 * @buf: Data buffer of the command
1001 * @buflen: Length of data buffer
1003 * Executes libata internal command with timeout. @tf contains
1004 * command on entry and result on return. Timeout and error
1005 * conditions are reported via return value. No recovery action
1006 * is taken after a command times out. It's caller's duty to
1007 * clean up after timeout.
1010 * None. Should be called with kernel context, might sleep.
1013 * Zero on success, AC_ERR_* mask on failure
1015 unsigned ata_exec_internal(struct ata_device *dev,
1016 struct ata_taskfile *tf, const u8 *cdb,
1017 int dma_dir, void *buf, unsigned int buflen)
1019 struct ata_port *ap = dev->ap;
1020 u8 command = tf->command;
1021 struct ata_queued_cmd *qc;
1022 unsigned int tag, preempted_tag;
1023 u32 preempted_sactive, preempted_qc_active;
1024 DECLARE_COMPLETION_ONSTACK(wait);
1025 unsigned long flags;
1026 unsigned int err_mask;
1029 spin_lock_irqsave(ap->lock, flags);
1031 /* no internal command while frozen */
1032 if (ap->pflags & ATA_PFLAG_FROZEN) {
1033 spin_unlock_irqrestore(ap->lock, flags);
1034 return AC_ERR_SYSTEM;
1037 /* initialize internal qc */
1039 /* XXX: Tag 0 is used for drivers with legacy EH as some
1040 * drivers choke if any other tag is given. This breaks
1041 * ata_tag_internal() test for those drivers. Don't use new
1042 * EH stuff without converting to it.
1044 if (ap->ops->error_handler)
1045 tag = ATA_TAG_INTERNAL;
1049 if (test_and_set_bit(tag, &ap->qc_allocated))
1051 qc = __ata_qc_from_tag(ap, tag);
1059 preempted_tag = ap->active_tag;
1060 preempted_sactive = ap->sactive;
1061 preempted_qc_active = ap->qc_active;
1062 ap->active_tag = ATA_TAG_POISON;
1066 /* prepare & issue qc */
1069 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1070 qc->flags |= ATA_QCFLAG_RESULT_TF;
1071 qc->dma_dir = dma_dir;
1072 if (dma_dir != DMA_NONE) {
1073 ata_sg_init_one(qc, buf, buflen);
1074 qc->nsect = buflen / ATA_SECT_SIZE;
1077 qc->private_data = &wait;
1078 qc->complete_fn = ata_qc_complete_internal;
1082 spin_unlock_irqrestore(ap->lock, flags);
1084 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1086 ata_port_flush_task(ap);
1089 spin_lock_irqsave(ap->lock, flags);
1091 /* We're racing with irq here. If we lose, the
1092 * following test prevents us from completing the qc
1093 * twice. If we win, the port is frozen and will be
1094 * cleaned up by ->post_internal_cmd().
1096 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1097 qc->err_mask |= AC_ERR_TIMEOUT;
1099 if (ap->ops->error_handler)
1100 ata_port_freeze(ap);
1102 ata_qc_complete(qc);
1104 if (ata_msg_warn(ap))
1105 ata_dev_printk(dev, KERN_WARNING,
1106 "qc timeout (cmd 0x%x)\n", command);
1109 spin_unlock_irqrestore(ap->lock, flags);
1112 /* do post_internal_cmd */
1113 if (ap->ops->post_internal_cmd)
1114 ap->ops->post_internal_cmd(qc);
1116 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1117 if (ata_msg_warn(ap))
1118 ata_dev_printk(dev, KERN_WARNING,
1119 "zero err_mask for failed "
1120 "internal command, assuming AC_ERR_OTHER\n");
1121 qc->err_mask |= AC_ERR_OTHER;
1125 spin_lock_irqsave(ap->lock, flags);
1127 *tf = qc->result_tf;
1128 err_mask = qc->err_mask;
1131 ap->active_tag = preempted_tag;
1132 ap->sactive = preempted_sactive;
1133 ap->qc_active = preempted_qc_active;
1135 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1136 * Until those drivers are fixed, we detect the condition
1137 * here, fail the command with AC_ERR_SYSTEM and reenable the
1140 * Note that this doesn't change any behavior as internal
1141 * command failure results in disabling the device in the
1142 * higher layer for LLDDs without new reset/EH callbacks.
1144 * Kill the following code as soon as those drivers are fixed.
1146 if (ap->flags & ATA_FLAG_DISABLED) {
1147 err_mask |= AC_ERR_SYSTEM;
1151 spin_unlock_irqrestore(ap->lock, flags);
1157 * ata_do_simple_cmd - execute simple internal command
1158 * @dev: Device to which the command is sent
1159 * @cmd: Opcode to execute
1161 * Execute a 'simple' command, that only consists of the opcode
1162 * 'cmd' itself, without filling any other registers
1165 * Kernel thread context (may sleep).
1168 * Zero on success, AC_ERR_* mask on failure
1170 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1172 struct ata_taskfile tf;
1174 ata_tf_init(dev, &tf);
1177 tf.flags |= ATA_TFLAG_DEVICE;
1178 tf.protocol = ATA_PROT_NODATA;
1180 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1184 * ata_pio_need_iordy - check if iordy needed
1187 * Check if the current speed of the device requires IORDY. Used
1188 * by various controllers for chip configuration.
1191 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1194 int speed = adev->pio_mode - XFER_PIO_0;
1201 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1203 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1204 pio = adev->id[ATA_ID_EIDE_PIO];
1205 /* Is the speed faster than the drive allows non IORDY ? */
1207 /* This is cycle times not frequency - watch the logic! */
1208 if (pio > 240) /* PIO2 is 240nS per cycle */
1217 * ata_dev_read_id - Read ID data from the specified device
1218 * @dev: target device
1219 * @p_class: pointer to class of the target device (may be changed)
1220 * @post_reset: is this read ID post-reset?
1221 * @id: buffer to read IDENTIFY data into
1223 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1224 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1225 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1226 * for pre-ATA4 drives.
1229 * Kernel thread context (may sleep)
1232 * 0 on success, -errno otherwise.
1234 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1235 int post_reset, u16 *id)
1237 struct ata_port *ap = dev->ap;
1238 unsigned int class = *p_class;
1239 struct ata_taskfile tf;
1240 unsigned int err_mask = 0;
1244 if (ata_msg_ctl(ap))
1245 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1246 __FUNCTION__, ap->id, dev->devno);
1248 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1251 ata_tf_init(dev, &tf);
1255 tf.command = ATA_CMD_ID_ATA;
1258 tf.command = ATA_CMD_ID_ATAPI;
1262 reason = "unsupported class";
1266 tf.protocol = ATA_PROT_PIO;
1268 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1269 id, sizeof(id[0]) * ATA_ID_WORDS);
1272 reason = "I/O error";
1276 swap_buf_le16(id, ATA_ID_WORDS);
1280 reason = "device reports illegal type";
1282 if (class == ATA_DEV_ATA) {
1283 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1286 if (ata_id_is_ata(id))
1290 if (post_reset && class == ATA_DEV_ATA) {
1292 * The exact sequence expected by certain pre-ATA4 drives is:
1295 * INITIALIZE DEVICE PARAMETERS
1297 * Some drives were very specific about that exact sequence.
1299 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1300 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1303 reason = "INIT_DEV_PARAMS failed";
1307 /* current CHS translation info (id[53-58]) might be
1308 * changed. reread the identify device info.
1320 if (ata_msg_warn(ap))
1321 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1322 "(%s, err_mask=0x%x)\n", reason, err_mask);
1326 static inline u8 ata_dev_knobble(struct ata_device *dev)
1328 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1331 static void ata_dev_config_ncq(struct ata_device *dev,
1332 char *desc, size_t desc_sz)
1334 struct ata_port *ap = dev->ap;
1335 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1337 if (!ata_id_has_ncq(dev->id)) {
1342 if (ap->flags & ATA_FLAG_NCQ) {
1343 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1344 dev->flags |= ATA_DFLAG_NCQ;
1347 if (hdepth >= ddepth)
1348 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1350 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1353 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1357 if (ap->scsi_host) {
1358 unsigned int len = 0;
1360 for (i = 0; i < ATA_MAX_DEVICES; i++)
1361 len = max(len, ap->device[i].cdb_len);
1363 ap->scsi_host->max_cmd_len = len;
1368 * ata_dev_configure - Configure the specified ATA/ATAPI device
1369 * @dev: Target device to configure
1370 * @print_info: Enable device info printout
1372 * Configure @dev according to @dev->id. Generic and low-level
1373 * driver specific fixups are also applied.
1376 * Kernel thread context (may sleep)
1379 * 0 on success, -errno otherwise
1381 int ata_dev_configure(struct ata_device *dev, int print_info)
1383 struct ata_port *ap = dev->ap;
1384 const u16 *id = dev->id;
1385 unsigned int xfer_mask;
1386 char revbuf[7]; /* XYZ-99\0 */
1389 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1390 ata_dev_printk(dev, KERN_INFO,
1391 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1392 __FUNCTION__, ap->id, dev->devno);
1396 if (ata_msg_probe(ap))
1397 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1398 __FUNCTION__, ap->id, dev->devno);
1400 /* print device capabilities */
1401 if (ata_msg_probe(ap))
1402 ata_dev_printk(dev, KERN_DEBUG,
1403 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1404 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1406 id[49], id[82], id[83], id[84],
1407 id[85], id[86], id[87], id[88]);
1409 /* initialize to-be-configured parameters */
1410 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1411 dev->max_sectors = 0;
1419 * common ATA, ATAPI feature tests
1422 /* find max transfer mode; for printk only */
1423 xfer_mask = ata_id_xfermask(id);
1425 if (ata_msg_probe(ap))
1428 /* ATA-specific feature tests */
1429 if (dev->class == ATA_DEV_ATA) {
1430 if (ata_id_is_cfa(id)) {
1431 if (id[162] & 1) /* CPRM may make this media unusable */
1432 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1433 ap->id, dev->devno);
1434 snprintf(revbuf, 7, "CFA");
1437 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1439 dev->n_sectors = ata_id_n_sectors(id);
1441 if (ata_id_has_lba(id)) {
1442 const char *lba_desc;
1446 dev->flags |= ATA_DFLAG_LBA;
1447 if (ata_id_has_lba48(id)) {
1448 dev->flags |= ATA_DFLAG_LBA48;
1453 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1455 /* print device info to dmesg */
1456 if (ata_msg_drv(ap) && print_info)
1457 ata_dev_printk(dev, KERN_INFO, "%s, "
1458 "max %s, %Lu sectors: %s %s\n",
1460 ata_mode_string(xfer_mask),
1461 (unsigned long long)dev->n_sectors,
1462 lba_desc, ncq_desc);
1466 /* Default translation */
1467 dev->cylinders = id[1];
1469 dev->sectors = id[6];
1471 if (ata_id_current_chs_valid(id)) {
1472 /* Current CHS translation is valid. */
1473 dev->cylinders = id[54];
1474 dev->heads = id[55];
1475 dev->sectors = id[56];
1478 /* print device info to dmesg */
1479 if (ata_msg_drv(ap) && print_info)
1480 ata_dev_printk(dev, KERN_INFO, "%s, "
1481 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1483 ata_mode_string(xfer_mask),
1484 (unsigned long long)dev->n_sectors,
1485 dev->cylinders, dev->heads,
1489 if (dev->id[59] & 0x100) {
1490 dev->multi_count = dev->id[59] & 0xff;
1491 if (ata_msg_drv(ap) && print_info)
1492 ata_dev_printk(dev, KERN_INFO,
1493 "ata%u: dev %u multi count %u\n",
1494 ap->id, dev->devno, dev->multi_count);
1500 /* ATAPI-specific feature tests */
1501 else if (dev->class == ATA_DEV_ATAPI) {
1502 char *cdb_intr_string = "";
1504 rc = atapi_cdb_len(id);
1505 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1506 if (ata_msg_warn(ap))
1507 ata_dev_printk(dev, KERN_WARNING,
1508 "unsupported CDB len\n");
1512 dev->cdb_len = (unsigned int) rc;
1514 if (ata_id_cdb_intr(dev->id)) {
1515 dev->flags |= ATA_DFLAG_CDB_INTR;
1516 cdb_intr_string = ", CDB intr";
1519 /* print device info to dmesg */
1520 if (ata_msg_drv(ap) && print_info)
1521 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1522 ata_mode_string(xfer_mask),
1526 ata_set_port_max_cmd_len(ap);
1528 /* limit bridge transfers to udma5, 200 sectors */
1529 if (ata_dev_knobble(dev)) {
1530 if (ata_msg_drv(ap) && print_info)
1531 ata_dev_printk(dev, KERN_INFO,
1532 "applying bridge limits\n");
1533 dev->udma_mask &= ATA_UDMA5;
1534 dev->max_sectors = ATA_MAX_SECTORS;
1537 if (ap->ops->dev_config)
1538 ap->ops->dev_config(ap, dev);
1540 if (ata_msg_probe(ap))
1541 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1542 __FUNCTION__, ata_chk_status(ap));
1546 if (ata_msg_probe(ap))
1547 ata_dev_printk(dev, KERN_DEBUG,
1548 "%s: EXIT, err\n", __FUNCTION__);
1553 * ata_bus_probe - Reset and probe ATA bus
1556 * Master ATA bus probing function. Initiates a hardware-dependent
1557 * bus reset, then attempts to identify any devices found on
1561 * PCI/etc. bus probe sem.
1564 * Zero on success, negative errno otherwise.
1567 int ata_bus_probe(struct ata_port *ap)
1569 unsigned int classes[ATA_MAX_DEVICES];
1570 int tries[ATA_MAX_DEVICES];
1571 int i, rc, down_xfermask;
1572 struct ata_device *dev;
1576 for (i = 0; i < ATA_MAX_DEVICES; i++)
1577 tries[i] = ATA_PROBE_MAX_TRIES;
1582 /* reset and determine device classes */
1583 ap->ops->phy_reset(ap);
1585 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1586 dev = &ap->device[i];
1588 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1589 dev->class != ATA_DEV_UNKNOWN)
1590 classes[dev->devno] = dev->class;
1592 classes[dev->devno] = ATA_DEV_NONE;
1594 dev->class = ATA_DEV_UNKNOWN;
1599 /* after the reset the device state is PIO 0 and the controller
1600 state is undefined. Record the mode */
1602 for (i = 0; i < ATA_MAX_DEVICES; i++)
1603 ap->device[i].pio_mode = XFER_PIO_0;
1605 /* read IDENTIFY page and configure devices */
1606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1607 dev = &ap->device[i];
1610 dev->class = classes[i];
1612 if (!ata_dev_enabled(dev))
1615 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1619 rc = ata_dev_configure(dev, 1);
1624 /* configure transfer mode */
1625 rc = ata_set_mode(ap, &dev);
1631 for (i = 0; i < ATA_MAX_DEVICES; i++)
1632 if (ata_dev_enabled(&ap->device[i]))
1635 /* no device present, disable port */
1636 ata_port_disable(ap);
1637 ap->ops->port_disable(ap);
1644 tries[dev->devno] = 0;
1647 sata_down_spd_limit(ap);
1650 tries[dev->devno]--;
1651 if (down_xfermask &&
1652 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1653 tries[dev->devno] = 0;
1656 if (!tries[dev->devno]) {
1657 ata_down_xfermask_limit(dev, 1);
1658 ata_dev_disable(dev);
1665 * ata_port_probe - Mark port as enabled
1666 * @ap: Port for which we indicate enablement
1668 * Modify @ap data structure such that the system
1669 * thinks that the entire port is enabled.
1671 * LOCKING: host lock, or some other form of
1675 void ata_port_probe(struct ata_port *ap)
1677 ap->flags &= ~ATA_FLAG_DISABLED;
1681 * sata_print_link_status - Print SATA link status
1682 * @ap: SATA port to printk link status about
1684 * This function prints link speed and status of a SATA link.
1689 static void sata_print_link_status(struct ata_port *ap)
1691 u32 sstatus, scontrol, tmp;
1693 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1695 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1697 if (ata_port_online(ap)) {
1698 tmp = (sstatus >> 4) & 0xf;
1699 ata_port_printk(ap, KERN_INFO,
1700 "SATA link up %s (SStatus %X SControl %X)\n",
1701 sata_spd_string(tmp), sstatus, scontrol);
1703 ata_port_printk(ap, KERN_INFO,
1704 "SATA link down (SStatus %X SControl %X)\n",
1710 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1711 * @ap: SATA port associated with target SATA PHY.
1713 * This function issues commands to standard SATA Sxxx
1714 * PHY registers, to wake up the phy (and device), and
1715 * clear any reset condition.
1718 * PCI/etc. bus probe sem.
1721 void __sata_phy_reset(struct ata_port *ap)
1724 unsigned long timeout = jiffies + (HZ * 5);
1726 if (ap->flags & ATA_FLAG_SATA_RESET) {
1727 /* issue phy wake/reset */
1728 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1729 /* Couldn't find anything in SATA I/II specs, but
1730 * AHCI-1.1 10.4.2 says at least 1 ms. */
1733 /* phy wake/clear reset */
1734 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1736 /* wait for phy to become ready, if necessary */
1739 sata_scr_read(ap, SCR_STATUS, &sstatus);
1740 if ((sstatus & 0xf) != 1)
1742 } while (time_before(jiffies, timeout));
1744 /* print link status */
1745 sata_print_link_status(ap);
1747 /* TODO: phy layer with polling, timeouts, etc. */
1748 if (!ata_port_offline(ap))
1751 ata_port_disable(ap);
1753 if (ap->flags & ATA_FLAG_DISABLED)
1756 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1757 ata_port_disable(ap);
1761 ap->cbl = ATA_CBL_SATA;
1765 * sata_phy_reset - Reset SATA bus.
1766 * @ap: SATA port associated with target SATA PHY.
1768 * This function resets the SATA bus, and then probes
1769 * the bus for devices.
1772 * PCI/etc. bus probe sem.
1775 void sata_phy_reset(struct ata_port *ap)
1777 __sata_phy_reset(ap);
1778 if (ap->flags & ATA_FLAG_DISABLED)
1784 * ata_dev_pair - return other device on cable
1787 * Obtain the other device on the same cable, or if none is
1788 * present NULL is returned
1791 struct ata_device *ata_dev_pair(struct ata_device *adev)
1793 struct ata_port *ap = adev->ap;
1794 struct ata_device *pair = &ap->device[1 - adev->devno];
1795 if (!ata_dev_enabled(pair))
1801 * ata_port_disable - Disable port.
1802 * @ap: Port to be disabled.
1804 * Modify @ap data structure such that the system
1805 * thinks that the entire port is disabled, and should
1806 * never attempt to probe or communicate with devices
1809 * LOCKING: host lock, or some other form of
1813 void ata_port_disable(struct ata_port *ap)
1815 ap->device[0].class = ATA_DEV_NONE;
1816 ap->device[1].class = ATA_DEV_NONE;
1817 ap->flags |= ATA_FLAG_DISABLED;
1821 * sata_down_spd_limit - adjust SATA spd limit downward
1822 * @ap: Port to adjust SATA spd limit for
1824 * Adjust SATA spd limit of @ap downward. Note that this
1825 * function only adjusts the limit. The change must be applied
1826 * using sata_set_spd().
1829 * Inherited from caller.
1832 * 0 on success, negative errno on failure
1834 int sata_down_spd_limit(struct ata_port *ap)
1836 u32 sstatus, spd, mask;
1839 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1843 mask = ap->sata_spd_limit;
1846 highbit = fls(mask) - 1;
1847 mask &= ~(1 << highbit);
1849 spd = (sstatus >> 4) & 0xf;
1853 mask &= (1 << spd) - 1;
1857 ap->sata_spd_limit = mask;
1859 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1860 sata_spd_string(fls(mask)));
1865 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1869 if (ap->sata_spd_limit == UINT_MAX)
1872 limit = fls(ap->sata_spd_limit);
1874 spd = (*scontrol >> 4) & 0xf;
1875 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1877 return spd != limit;
1881 * sata_set_spd_needed - is SATA spd configuration needed
1882 * @ap: Port in question
1884 * Test whether the spd limit in SControl matches
1885 * @ap->sata_spd_limit. This function is used to determine
1886 * whether hardreset is necessary to apply SATA spd
1890 * Inherited from caller.
1893 * 1 if SATA spd configuration is needed, 0 otherwise.
1895 int sata_set_spd_needed(struct ata_port *ap)
1899 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1902 return __sata_set_spd_needed(ap, &scontrol);
1906 * sata_set_spd - set SATA spd according to spd limit
1907 * @ap: Port to set SATA spd for
1909 * Set SATA spd of @ap according to sata_spd_limit.
1912 * Inherited from caller.
1915 * 0 if spd doesn't need to be changed, 1 if spd has been
1916 * changed. Negative errno if SCR registers are inaccessible.
1918 int sata_set_spd(struct ata_port *ap)
1923 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1926 if (!__sata_set_spd_needed(ap, &scontrol))
1929 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1936 * This mode timing computation functionality is ported over from
1937 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1940 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1941 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1942 * for UDMA6, which is currently supported only by Maxtor drives.
1944 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1947 static const struct ata_timing ata_timing[] = {
1949 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1950 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1951 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1952 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1954 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1955 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1956 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1957 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1958 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1960 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1962 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1963 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1964 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1966 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1967 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1968 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1970 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1971 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1972 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1973 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1975 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1976 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1977 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1979 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1984 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1985 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1987 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1989 q->setup = EZ(t->setup * 1000, T);
1990 q->act8b = EZ(t->act8b * 1000, T);
1991 q->rec8b = EZ(t->rec8b * 1000, T);
1992 q->cyc8b = EZ(t->cyc8b * 1000, T);
1993 q->active = EZ(t->active * 1000, T);
1994 q->recover = EZ(t->recover * 1000, T);
1995 q->cycle = EZ(t->cycle * 1000, T);
1996 q->udma = EZ(t->udma * 1000, UT);
1999 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2000 struct ata_timing *m, unsigned int what)
2002 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2003 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2004 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2005 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2006 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2007 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2008 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2009 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2012 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2014 const struct ata_timing *t;
2016 for (t = ata_timing; t->mode != speed; t++)
2017 if (t->mode == 0xFF)
2022 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2023 struct ata_timing *t, int T, int UT)
2025 const struct ata_timing *s;
2026 struct ata_timing p;
2032 if (!(s = ata_timing_find_mode(speed)))
2035 memcpy(t, s, sizeof(*s));
2038 * If the drive is an EIDE drive, it can tell us it needs extended
2039 * PIO/MW_DMA cycle timing.
2042 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2043 memset(&p, 0, sizeof(p));
2044 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2045 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2046 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2047 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2048 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2050 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2054 * Convert the timing to bus clock counts.
2057 ata_timing_quantize(t, t, T, UT);
2060 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2061 * S.M.A.R.T * and some other commands. We have to ensure that the
2062 * DMA cycle timing is slower/equal than the fastest PIO timing.
2065 if (speed > XFER_PIO_4) {
2066 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2067 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2071 * Lengthen active & recovery time so that cycle time is correct.
2074 if (t->act8b + t->rec8b < t->cyc8b) {
2075 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2076 t->rec8b = t->cyc8b - t->act8b;
2079 if (t->active + t->recover < t->cycle) {
2080 t->active += (t->cycle - (t->active + t->recover)) / 2;
2081 t->recover = t->cycle - t->active;
2088 * ata_down_xfermask_limit - adjust dev xfer masks downward
2089 * @dev: Device to adjust xfer masks
2090 * @force_pio0: Force PIO0
2092 * Adjust xfer masks of @dev downward. Note that this function
2093 * does not apply the change. Invoking ata_set_mode() afterwards
2094 * will apply the limit.
2097 * Inherited from caller.
2100 * 0 on success, negative errno on failure
2102 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2104 unsigned long xfer_mask;
2107 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2112 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2113 if (xfer_mask & ATA_MASK_UDMA)
2114 xfer_mask &= ~ATA_MASK_MWDMA;
2116 highbit = fls(xfer_mask) - 1;
2117 xfer_mask &= ~(1 << highbit);
2119 xfer_mask &= 1 << ATA_SHIFT_PIO;
2123 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2126 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2127 ata_mode_string(xfer_mask));
2135 static int ata_dev_set_mode(struct ata_device *dev)
2137 unsigned int err_mask;
2140 dev->flags &= ~ATA_DFLAG_PIO;
2141 if (dev->xfer_shift == ATA_SHIFT_PIO)
2142 dev->flags |= ATA_DFLAG_PIO;
2144 err_mask = ata_dev_set_xfermode(dev);
2146 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2147 "(err_mask=0x%x)\n", err_mask);
2151 rc = ata_dev_revalidate(dev, 0);
2155 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2156 dev->xfer_shift, (int)dev->xfer_mode);
2158 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2159 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2164 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2165 * @ap: port on which timings will be programmed
2166 * @r_failed_dev: out paramter for failed device
2168 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2169 * ata_set_mode() fails, pointer to the failing device is
2170 * returned in @r_failed_dev.
2173 * PCI/etc. bus probe sem.
2176 * 0 on success, negative errno otherwise
2178 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2180 struct ata_device *dev;
2181 int i, rc = 0, used_dma = 0, found = 0;
2183 /* has private set_mode? */
2184 if (ap->ops->set_mode) {
2185 /* FIXME: make ->set_mode handle no device case and
2186 * return error code and failing device on failure.
2188 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2189 if (ata_dev_ready(&ap->device[i])) {
2190 ap->ops->set_mode(ap);
2197 /* step 1: calculate xfer_mask */
2198 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2199 unsigned int pio_mask, dma_mask;
2201 dev = &ap->device[i];
2203 if (!ata_dev_enabled(dev))
2206 ata_dev_xfermask(dev);
2208 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2209 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2210 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2211 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2220 /* step 2: always set host PIO timings */
2221 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2222 dev = &ap->device[i];
2223 if (!ata_dev_enabled(dev))
2226 if (!dev->pio_mode) {
2227 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2232 dev->xfer_mode = dev->pio_mode;
2233 dev->xfer_shift = ATA_SHIFT_PIO;
2234 if (ap->ops->set_piomode)
2235 ap->ops->set_piomode(ap, dev);
2238 /* step 3: set host DMA timings */
2239 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2240 dev = &ap->device[i];
2242 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2245 dev->xfer_mode = dev->dma_mode;
2246 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2247 if (ap->ops->set_dmamode)
2248 ap->ops->set_dmamode(ap, dev);
2251 /* step 4: update devices' xfer mode */
2252 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2253 dev = &ap->device[i];
2255 /* don't udpate suspended devices' xfer mode */
2256 if (!ata_dev_ready(dev))
2259 rc = ata_dev_set_mode(dev);
2264 /* Record simplex status. If we selected DMA then the other
2265 * host channels are not permitted to do so.
2267 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2268 ap->host->simplex_claimed = 1;
2270 /* step5: chip specific finalisation */
2271 if (ap->ops->post_set_mode)
2272 ap->ops->post_set_mode(ap);
2276 *r_failed_dev = dev;
2281 * ata_tf_to_host - issue ATA taskfile to host controller
2282 * @ap: port to which command is being issued
2283 * @tf: ATA taskfile register set
2285 * Issues ATA taskfile register set to ATA host controller,
2286 * with proper synchronization with interrupt handler and
2290 * spin_lock_irqsave(host lock)
2293 static inline void ata_tf_to_host(struct ata_port *ap,
2294 const struct ata_taskfile *tf)
2296 ap->ops->tf_load(ap, tf);
2297 ap->ops->exec_command(ap, tf);
2301 * ata_busy_sleep - sleep until BSY clears, or timeout
2302 * @ap: port containing status register to be polled
2303 * @tmout_pat: impatience timeout
2304 * @tmout: overall timeout
2306 * Sleep until ATA Status register bit BSY clears,
2307 * or a timeout occurs.
2312 unsigned int ata_busy_sleep (struct ata_port *ap,
2313 unsigned long tmout_pat, unsigned long tmout)
2315 unsigned long timer_start, timeout;
2318 status = ata_busy_wait(ap, ATA_BUSY, 300);
2319 timer_start = jiffies;
2320 timeout = timer_start + tmout_pat;
2321 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2323 status = ata_busy_wait(ap, ATA_BUSY, 3);
2326 if (status & ATA_BUSY)
2327 ata_port_printk(ap, KERN_WARNING,
2328 "port is slow to respond, please be patient\n");
2330 timeout = timer_start + tmout;
2331 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2333 status = ata_chk_status(ap);
2336 if (status & ATA_BUSY) {
2337 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2338 "(%lu secs)\n", tmout / HZ);
2345 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2347 struct ata_ioports *ioaddr = &ap->ioaddr;
2348 unsigned int dev0 = devmask & (1 << 0);
2349 unsigned int dev1 = devmask & (1 << 1);
2350 unsigned long timeout;
2352 /* if device 0 was found in ata_devchk, wait for its
2356 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2358 /* if device 1 was found in ata_devchk, wait for
2359 * register access, then wait for BSY to clear
2361 timeout = jiffies + ATA_TMOUT_BOOT;
2365 ap->ops->dev_select(ap, 1);
2366 if (ap->flags & ATA_FLAG_MMIO) {
2367 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2368 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2370 nsect = inb(ioaddr->nsect_addr);
2371 lbal = inb(ioaddr->lbal_addr);
2373 if ((nsect == 1) && (lbal == 1))
2375 if (time_after(jiffies, timeout)) {
2379 msleep(50); /* give drive a breather */
2382 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2384 /* is all this really necessary? */
2385 ap->ops->dev_select(ap, 0);
2387 ap->ops->dev_select(ap, 1);
2389 ap->ops->dev_select(ap, 0);
2392 static unsigned int ata_bus_softreset(struct ata_port *ap,
2393 unsigned int devmask)
2395 struct ata_ioports *ioaddr = &ap->ioaddr;
2397 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2399 /* software reset. causes dev0 to be selected */
2400 if (ap->flags & ATA_FLAG_MMIO) {
2401 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2402 udelay(20); /* FIXME: flush */
2403 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2404 udelay(20); /* FIXME: flush */
2405 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2407 outb(ap->ctl, ioaddr->ctl_addr);
2409 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2411 outb(ap->ctl, ioaddr->ctl_addr);
2414 /* spec mandates ">= 2ms" before checking status.
2415 * We wait 150ms, because that was the magic delay used for
2416 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2417 * between when the ATA command register is written, and then
2418 * status is checked. Because waiting for "a while" before
2419 * checking status is fine, post SRST, we perform this magic
2420 * delay here as well.
2422 * Old drivers/ide uses the 2mS rule and then waits for ready
2426 /* Before we perform post reset processing we want to see if
2427 * the bus shows 0xFF because the odd clown forgets the D7
2428 * pulldown resistor.
2430 if (ata_check_status(ap) == 0xFF) {
2431 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2432 return AC_ERR_OTHER;
2435 ata_bus_post_reset(ap, devmask);
2441 * ata_bus_reset - reset host port and associated ATA channel
2442 * @ap: port to reset
2444 * This is typically the first time we actually start issuing
2445 * commands to the ATA channel. We wait for BSY to clear, then
2446 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2447 * result. Determine what devices, if any, are on the channel
2448 * by looking at the device 0/1 error register. Look at the signature
2449 * stored in each device's taskfile registers, to determine if
2450 * the device is ATA or ATAPI.
2453 * PCI/etc. bus probe sem.
2454 * Obtains host lock.
2457 * Sets ATA_FLAG_DISABLED if bus reset fails.
2460 void ata_bus_reset(struct ata_port *ap)
2462 struct ata_ioports *ioaddr = &ap->ioaddr;
2463 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2465 unsigned int dev0, dev1 = 0, devmask = 0;
2467 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2469 /* determine if device 0/1 are present */
2470 if (ap->flags & ATA_FLAG_SATA_RESET)
2473 dev0 = ata_devchk(ap, 0);
2475 dev1 = ata_devchk(ap, 1);
2479 devmask |= (1 << 0);
2481 devmask |= (1 << 1);
2483 /* select device 0 again */
2484 ap->ops->dev_select(ap, 0);
2486 /* issue bus reset */
2487 if (ap->flags & ATA_FLAG_SRST)
2488 if (ata_bus_softreset(ap, devmask))
2492 * determine by signature whether we have ATA or ATAPI devices
2494 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2495 if ((slave_possible) && (err != 0x81))
2496 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2498 /* re-enable interrupts */
2499 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2502 /* is double-select really necessary? */
2503 if (ap->device[1].class != ATA_DEV_NONE)
2504 ap->ops->dev_select(ap, 1);
2505 if (ap->device[0].class != ATA_DEV_NONE)
2506 ap->ops->dev_select(ap, 0);
2508 /* if no devices were detected, disable this port */
2509 if ((ap->device[0].class == ATA_DEV_NONE) &&
2510 (ap->device[1].class == ATA_DEV_NONE))
2513 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2514 /* set up device control for ATA_FLAG_SATA_RESET */
2515 if (ap->flags & ATA_FLAG_MMIO)
2516 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2518 outb(ap->ctl, ioaddr->ctl_addr);
2525 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2526 ap->ops->port_disable(ap);
2532 * sata_phy_debounce - debounce SATA phy status
2533 * @ap: ATA port to debounce SATA phy status for
2534 * @params: timing parameters { interval, duratinon, timeout } in msec
2536 * Make sure SStatus of @ap reaches stable state, determined by
2537 * holding the same value where DET is not 1 for @duration polled
2538 * every @interval, before @timeout. Timeout constraints the
2539 * beginning of the stable state. Because, after hot unplugging,
2540 * DET gets stuck at 1 on some controllers, this functions waits
2541 * until timeout then returns 0 if DET is stable at 1.
2544 * Kernel thread context (may sleep)
2547 * 0 on success, -errno on failure.
2549 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2551 unsigned long interval_msec = params[0];
2552 unsigned long duration = params[1] * HZ / 1000;
2553 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2554 unsigned long last_jiffies;
2558 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2563 last_jiffies = jiffies;
2566 msleep(interval_msec);
2567 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2573 if (cur == 1 && time_before(jiffies, timeout))
2575 if (time_after(jiffies, last_jiffies + duration))
2580 /* unstable, start over */
2582 last_jiffies = jiffies;
2585 if (time_after(jiffies, timeout))
2591 * sata_phy_resume - resume SATA phy
2592 * @ap: ATA port to resume SATA phy for
2593 * @params: timing parameters { interval, duratinon, timeout } in msec
2595 * Resume SATA phy of @ap and debounce it.
2598 * Kernel thread context (may sleep)
2601 * 0 on success, -errno on failure.
2603 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2608 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2611 scontrol = (scontrol & 0x0f0) | 0x300;
2613 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2616 /* Some PHYs react badly if SStatus is pounded immediately
2617 * after resuming. Delay 200ms before debouncing.
2621 return sata_phy_debounce(ap, params);
2624 static void ata_wait_spinup(struct ata_port *ap)
2626 struct ata_eh_context *ehc = &ap->eh_context;
2627 unsigned long end, secs;
2630 /* first, debounce phy if SATA */
2631 if (ap->cbl == ATA_CBL_SATA) {
2632 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2634 /* if debounced successfully and offline, no need to wait */
2635 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2639 /* okay, let's give the drive time to spin up */
2640 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2641 secs = ((end - jiffies) + HZ - 1) / HZ;
2643 if (time_after(jiffies, end))
2647 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2648 "(%lu secs)\n", secs);
2650 schedule_timeout_uninterruptible(end - jiffies);
2654 * ata_std_prereset - prepare for reset
2655 * @ap: ATA port to be reset
2657 * @ap is about to be reset. Initialize it.
2660 * Kernel thread context (may sleep)
2663 * 0 on success, -errno otherwise.
2665 int ata_std_prereset(struct ata_port *ap)
2667 struct ata_eh_context *ehc = &ap->eh_context;
2668 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2671 /* handle link resume & hotplug spinup */
2672 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2673 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2674 ehc->i.action |= ATA_EH_HARDRESET;
2676 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2677 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2678 ata_wait_spinup(ap);
2680 /* if we're about to do hardreset, nothing more to do */
2681 if (ehc->i.action & ATA_EH_HARDRESET)
2684 /* if SATA, resume phy */
2685 if (ap->cbl == ATA_CBL_SATA) {
2686 rc = sata_phy_resume(ap, timing);
2687 if (rc && rc != -EOPNOTSUPP) {
2688 /* phy resume failed */
2689 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2690 "link for reset (errno=%d)\n", rc);
2695 /* Wait for !BSY if the controller can wait for the first D2H
2696 * Reg FIS and we don't know that no device is attached.
2698 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2699 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2705 * ata_std_softreset - reset host port via ATA SRST
2706 * @ap: port to reset
2707 * @classes: resulting classes of attached devices
2709 * Reset host port using ATA SRST.
2712 * Kernel thread context (may sleep)
2715 * 0 on success, -errno otherwise.
2717 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2719 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2720 unsigned int devmask = 0, err_mask;
2725 if (ata_port_offline(ap)) {
2726 classes[0] = ATA_DEV_NONE;
2730 /* determine if device 0/1 are present */
2731 if (ata_devchk(ap, 0))
2732 devmask |= (1 << 0);
2733 if (slave_possible && ata_devchk(ap, 1))
2734 devmask |= (1 << 1);
2736 /* select device 0 again */
2737 ap->ops->dev_select(ap, 0);
2739 /* issue bus reset */
2740 DPRINTK("about to softreset, devmask=%x\n", devmask);
2741 err_mask = ata_bus_softreset(ap, devmask);
2743 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2748 /* determine by signature whether we have ATA or ATAPI devices */
2749 classes[0] = ata_dev_try_classify(ap, 0, &err);
2750 if (slave_possible && err != 0x81)
2751 classes[1] = ata_dev_try_classify(ap, 1, &err);
2754 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2759 * sata_std_hardreset - reset host port via SATA phy reset
2760 * @ap: port to reset
2761 * @class: resulting class of attached device
2763 * SATA phy-reset host port using DET bits of SControl register.
2766 * Kernel thread context (may sleep)
2769 * 0 on success, -errno otherwise.
2771 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2773 struct ata_eh_context *ehc = &ap->eh_context;
2774 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2780 if (sata_set_spd_needed(ap)) {
2781 /* SATA spec says nothing about how to reconfigure
2782 * spd. To be on the safe side, turn off phy during
2783 * reconfiguration. This works for at least ICH7 AHCI
2786 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2789 scontrol = (scontrol & 0x0f0) | 0x304;
2791 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2797 /* issue phy wake/reset */
2798 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2801 scontrol = (scontrol & 0x0f0) | 0x301;
2803 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2806 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2807 * 10.4.2 says at least 1 ms.
2811 /* bring phy back */
2812 sata_phy_resume(ap, timing);
2814 /* TODO: phy layer with polling, timeouts, etc. */
2815 if (ata_port_offline(ap)) {
2816 *class = ATA_DEV_NONE;
2817 DPRINTK("EXIT, link offline\n");
2821 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2822 ata_port_printk(ap, KERN_ERR,
2823 "COMRESET failed (device not ready)\n");
2827 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2829 *class = ata_dev_try_classify(ap, 0, NULL);
2831 DPRINTK("EXIT, class=%u\n", *class);
2836 * ata_std_postreset - standard postreset callback
2837 * @ap: the target ata_port
2838 * @classes: classes of attached devices
2840 * This function is invoked after a successful reset. Note that
2841 * the device might have been reset more than once using
2842 * different reset methods before postreset is invoked.
2845 * Kernel thread context (may sleep)
2847 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2853 /* print link status */
2854 sata_print_link_status(ap);
2857 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2858 sata_scr_write(ap, SCR_ERROR, serror);
2860 /* re-enable interrupts */
2861 if (!ap->ops->error_handler) {
2862 /* FIXME: hack. create a hook instead */
2863 if (ap->ioaddr.ctl_addr)
2867 /* is double-select really necessary? */
2868 if (classes[0] != ATA_DEV_NONE)
2869 ap->ops->dev_select(ap, 1);
2870 if (classes[1] != ATA_DEV_NONE)
2871 ap->ops->dev_select(ap, 0);
2873 /* bail out if no device is present */
2874 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2875 DPRINTK("EXIT, no device\n");
2879 /* set up device control */
2880 if (ap->ioaddr.ctl_addr) {
2881 if (ap->flags & ATA_FLAG_MMIO)
2882 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2884 outb(ap->ctl, ap->ioaddr.ctl_addr);
2891 * ata_dev_same_device - Determine whether new ID matches configured device
2892 * @dev: device to compare against
2893 * @new_class: class of the new device
2894 * @new_id: IDENTIFY page of the new device
2896 * Compare @new_class and @new_id against @dev and determine
2897 * whether @dev is the device indicated by @new_class and
2904 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2906 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2909 const u16 *old_id = dev->id;
2910 unsigned char model[2][41], serial[2][21];
2913 if (dev->class != new_class) {
2914 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2915 dev->class, new_class);
2919 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2920 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2921 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2922 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2923 new_n_sectors = ata_id_n_sectors(new_id);
2925 if (strcmp(model[0], model[1])) {
2926 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2927 "'%s' != '%s'\n", model[0], model[1]);
2931 if (strcmp(serial[0], serial[1])) {
2932 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2933 "'%s' != '%s'\n", serial[0], serial[1]);
2937 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2938 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2940 (unsigned long long)dev->n_sectors,
2941 (unsigned long long)new_n_sectors);
2949 * ata_dev_revalidate - Revalidate ATA device
2950 * @dev: device to revalidate
2951 * @post_reset: is this revalidation after reset?
2953 * Re-read IDENTIFY page and make sure @dev is still attached to
2957 * Kernel thread context (may sleep)
2960 * 0 on success, negative errno otherwise
2962 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2964 unsigned int class = dev->class;
2965 u16 *id = (void *)dev->ap->sector_buf;
2968 if (!ata_dev_enabled(dev)) {
2974 rc = ata_dev_read_id(dev, &class, post_reset, id);
2978 /* is the device still there? */
2979 if (!ata_dev_same_device(dev, class, id)) {
2984 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2986 /* configure device according to the new ID */
2987 rc = ata_dev_configure(dev, 0);
2992 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2996 static const char * const ata_dma_blacklist [] = {
2997 "WDC AC11000H", NULL,
2998 "WDC AC22100H", NULL,
2999 "WDC AC32500H", NULL,
3000 "WDC AC33100H", NULL,
3001 "WDC AC31600H", NULL,
3002 "WDC AC32100H", "24.09P07",
3003 "WDC AC23200L", "21.10N21",
3004 "Compaq CRD-8241B", NULL,
3009 "SanDisk SDP3B", NULL,
3010 "SanDisk SDP3B-64", NULL,
3011 "SANYO CD-ROM CRD", NULL,
3012 "HITACHI CDR-8", NULL,
3013 "HITACHI CDR-8335", NULL,
3014 "HITACHI CDR-8435", NULL,
3015 "Toshiba CD-ROM XM-6202B", NULL,
3016 "TOSHIBA CD-ROM XM-1702BC", NULL,
3018 "E-IDE CD-ROM CR-840", NULL,
3019 "CD-ROM Drive/F5A", NULL,
3020 "WPI CDD-820", NULL,
3021 "SAMSUNG CD-ROM SC-148C", NULL,
3022 "SAMSUNG CD-ROM SC", NULL,
3023 "SanDisk SDP3B-64", NULL,
3024 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
3025 "_NEC DV5800A", NULL,
3026 "SAMSUNG CD-ROM SN-124", "N001"
3029 static int ata_strim(char *s, size_t len)
3031 len = strnlen(s, len);
3033 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3034 while ((len > 0) && (s[len - 1] == ' ')) {
3041 static int ata_dma_blacklisted(const struct ata_device *dev)
3043 unsigned char model_num[40];
3044 unsigned char model_rev[16];
3045 unsigned int nlen, rlen;
3048 /* We don't support polling DMA.
3049 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3050 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3052 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3053 (dev->flags & ATA_DFLAG_CDB_INTR))
3056 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3058 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3060 nlen = ata_strim(model_num, sizeof(model_num));
3061 rlen = ata_strim(model_rev, sizeof(model_rev));
3063 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3064 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3065 if (ata_dma_blacklist[i+1] == NULL)
3067 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3075 * ata_dev_xfermask - Compute supported xfermask of the given device
3076 * @dev: Device to compute xfermask for
3078 * Compute supported xfermask of @dev and store it in
3079 * dev->*_mask. This function is responsible for applying all
3080 * known limits including host controller limits, device
3086 static void ata_dev_xfermask(struct ata_device *dev)
3088 struct ata_port *ap = dev->ap;
3089 struct ata_host *host = ap->host;
3090 unsigned long xfer_mask;
3092 /* controller modes available */
3093 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3094 ap->mwdma_mask, ap->udma_mask);
3096 /* Apply cable rule here. Don't apply it early because when
3097 * we handle hot plug the cable type can itself change.
3099 if (ap->cbl == ATA_CBL_PATA40)
3100 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3102 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3103 dev->mwdma_mask, dev->udma_mask);
3104 xfer_mask &= ata_id_xfermask(dev->id);
3107 * CFA Advanced TrueIDE timings are not allowed on a shared
3110 if (ata_dev_pair(dev)) {
3111 /* No PIO5 or PIO6 */
3112 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3113 /* No MWDMA3 or MWDMA 4 */
3114 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3117 if (ata_dma_blacklisted(dev)) {
3118 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3119 ata_dev_printk(dev, KERN_WARNING,
3120 "device is on DMA blacklist, disabling DMA\n");
3123 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3124 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3125 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3126 "other device, disabling DMA\n");
3129 if (ap->ops->mode_filter)
3130 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3132 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3133 &dev->mwdma_mask, &dev->udma_mask);
3137 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3138 * @dev: Device to which command will be sent
3140 * Issue SET FEATURES - XFER MODE command to device @dev
3144 * PCI/etc. bus probe sem.
3147 * 0 on success, AC_ERR_* mask otherwise.
3150 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3152 struct ata_taskfile tf;
3153 unsigned int err_mask;
3155 /* set up set-features taskfile */
3156 DPRINTK("set features - xfer mode\n");
3158 ata_tf_init(dev, &tf);
3159 tf.command = ATA_CMD_SET_FEATURES;
3160 tf.feature = SETFEATURES_XFER;
3161 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3162 tf.protocol = ATA_PROT_NODATA;
3163 tf.nsect = dev->xfer_mode;
3165 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3167 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3172 * ata_dev_init_params - Issue INIT DEV PARAMS command
3173 * @dev: Device to which command will be sent
3174 * @heads: Number of heads (taskfile parameter)
3175 * @sectors: Number of sectors (taskfile parameter)
3178 * Kernel thread context (may sleep)
3181 * 0 on success, AC_ERR_* mask otherwise.
3183 static unsigned int ata_dev_init_params(struct ata_device *dev,
3184 u16 heads, u16 sectors)
3186 struct ata_taskfile tf;
3187 unsigned int err_mask;
3189 /* Number of sectors per track 1-255. Number of heads 1-16 */
3190 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3191 return AC_ERR_INVALID;
3193 /* set up init dev params taskfile */
3194 DPRINTK("init dev params \n");
3196 ata_tf_init(dev, &tf);
3197 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3198 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3199 tf.protocol = ATA_PROT_NODATA;
3201 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3203 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3205 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3210 * ata_sg_clean - Unmap DMA memory associated with command
3211 * @qc: Command containing DMA memory to be released
3213 * Unmap all mapped DMA memory associated with this command.
3216 * spin_lock_irqsave(host lock)
3219 static void ata_sg_clean(struct ata_queued_cmd *qc)
3221 struct ata_port *ap = qc->ap;
3222 struct scatterlist *sg = qc->__sg;
3223 int dir = qc->dma_dir;
3224 void *pad_buf = NULL;
3226 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3227 WARN_ON(sg == NULL);
3229 if (qc->flags & ATA_QCFLAG_SINGLE)
3230 WARN_ON(qc->n_elem > 1);
3232 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3234 /* if we padded the buffer out to 32-bit bound, and data
3235 * xfer direction is from-device, we must copy from the
3236 * pad buffer back into the supplied buffer
3238 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3239 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3241 if (qc->flags & ATA_QCFLAG_SG) {
3243 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3244 /* restore last sg */
3245 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3247 struct scatterlist *psg = &qc->pad_sgent;
3248 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3249 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3250 kunmap_atomic(addr, KM_IRQ0);
3254 dma_unmap_single(ap->dev,
3255 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3258 sg->length += qc->pad_len;
3260 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3261 pad_buf, qc->pad_len);
3264 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3269 * ata_fill_sg - Fill PCI IDE PRD table
3270 * @qc: Metadata associated with taskfile to be transferred
3272 * Fill PCI IDE PRD (scatter-gather) table with segments
3273 * associated with the current disk command.
3276 * spin_lock_irqsave(host lock)
3279 static void ata_fill_sg(struct ata_queued_cmd *qc)
3281 struct ata_port *ap = qc->ap;
3282 struct scatterlist *sg;
3285 WARN_ON(qc->__sg == NULL);
3286 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3289 ata_for_each_sg(sg, qc) {
3293 /* determine if physical DMA addr spans 64K boundary.
3294 * Note h/w doesn't support 64-bit, so we unconditionally
3295 * truncate dma_addr_t to u32.
3297 addr = (u32) sg_dma_address(sg);
3298 sg_len = sg_dma_len(sg);
3301 offset = addr & 0xffff;
3303 if ((offset + sg_len) > 0x10000)
3304 len = 0x10000 - offset;
3306 ap->prd[idx].addr = cpu_to_le32(addr);
3307 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3308 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3317 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3320 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3321 * @qc: Metadata associated with taskfile to check
3323 * Allow low-level driver to filter ATA PACKET commands, returning
3324 * a status indicating whether or not it is OK to use DMA for the
3325 * supplied PACKET command.
3328 * spin_lock_irqsave(host lock)
3330 * RETURNS: 0 when ATAPI DMA can be used
3333 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3335 struct ata_port *ap = qc->ap;
3336 int rc = 0; /* Assume ATAPI DMA is OK by default */
3338 if (ap->ops->check_atapi_dma)
3339 rc = ap->ops->check_atapi_dma(qc);
3344 * ata_qc_prep - Prepare taskfile for submission
3345 * @qc: Metadata associated with taskfile to be prepared
3347 * Prepare ATA taskfile for submission.
3350 * spin_lock_irqsave(host lock)
3352 void ata_qc_prep(struct ata_queued_cmd *qc)
3354 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3360 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3363 * ata_sg_init_one - Associate command with memory buffer
3364 * @qc: Command to be associated
3365 * @buf: Memory buffer
3366 * @buflen: Length of memory buffer, in bytes.
3368 * Initialize the data-related elements of queued_cmd @qc
3369 * to point to a single memory buffer, @buf of byte length @buflen.
3372 * spin_lock_irqsave(host lock)
3375 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3377 struct scatterlist *sg;
3379 qc->flags |= ATA_QCFLAG_SINGLE;
3381 memset(&qc->sgent, 0, sizeof(qc->sgent));
3382 qc->__sg = &qc->sgent;
3384 qc->orig_n_elem = 1;
3386 qc->nbytes = buflen;
3389 sg_init_one(sg, buf, buflen);
3393 * ata_sg_init - Associate command with scatter-gather table.
3394 * @qc: Command to be associated
3395 * @sg: Scatter-gather table.
3396 * @n_elem: Number of elements in s/g table.
3398 * Initialize the data-related elements of queued_cmd @qc
3399 * to point to a scatter-gather table @sg, containing @n_elem
3403 * spin_lock_irqsave(host lock)
3406 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3407 unsigned int n_elem)
3409 qc->flags |= ATA_QCFLAG_SG;
3411 qc->n_elem = n_elem;
3412 qc->orig_n_elem = n_elem;
3416 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3417 * @qc: Command with memory buffer to be mapped.
3419 * DMA-map the memory buffer associated with queued_cmd @qc.
3422 * spin_lock_irqsave(host lock)
3425 * Zero on success, negative on error.
3428 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3430 struct ata_port *ap = qc->ap;
3431 int dir = qc->dma_dir;
3432 struct scatterlist *sg = qc->__sg;
3433 dma_addr_t dma_address;
3436 /* we must lengthen transfers to end on a 32-bit boundary */
3437 qc->pad_len = sg->length & 3;
3439 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3440 struct scatterlist *psg = &qc->pad_sgent;
3442 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3444 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3446 if (qc->tf.flags & ATA_TFLAG_WRITE)
3447 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3450 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3451 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3453 sg->length -= qc->pad_len;
3454 if (sg->length == 0)
3457 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3458 sg->length, qc->pad_len);
3466 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3468 if (dma_mapping_error(dma_address)) {
3470 sg->length += qc->pad_len;
3474 sg_dma_address(sg) = dma_address;
3475 sg_dma_len(sg) = sg->length;
3478 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3479 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3485 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3486 * @qc: Command with scatter-gather table to be mapped.
3488 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3491 * spin_lock_irqsave(host lock)
3494 * Zero on success, negative on error.
3498 static int ata_sg_setup(struct ata_queued_cmd *qc)
3500 struct ata_port *ap = qc->ap;
3501 struct scatterlist *sg = qc->__sg;
3502 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3503 int n_elem, pre_n_elem, dir, trim_sg = 0;
3505 VPRINTK("ENTER, ata%u\n", ap->id);
3506 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3508 /* we must lengthen transfers to end on a 32-bit boundary */
3509 qc->pad_len = lsg->length & 3;
3511 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3512 struct scatterlist *psg = &qc->pad_sgent;
3513 unsigned int offset;
3515 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3517 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3520 * psg->page/offset are used to copy to-be-written
3521 * data in this function or read data in ata_sg_clean.
3523 offset = lsg->offset + lsg->length - qc->pad_len;
3524 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3525 psg->offset = offset_in_page(offset);
3527 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3528 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3529 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3530 kunmap_atomic(addr, KM_IRQ0);
3533 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3534 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3536 lsg->length -= qc->pad_len;
3537 if (lsg->length == 0)
3540 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3541 qc->n_elem - 1, lsg->length, qc->pad_len);
3544 pre_n_elem = qc->n_elem;
3545 if (trim_sg && pre_n_elem)
3554 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3556 /* restore last sg */
3557 lsg->length += qc->pad_len;
3561 DPRINTK("%d sg elements mapped\n", n_elem);
3564 qc->n_elem = n_elem;
3570 * swap_buf_le16 - swap halves of 16-bit words in place
3571 * @buf: Buffer to swap
3572 * @buf_words: Number of 16-bit words in buffer.
3574 * Swap halves of 16-bit words if needed to convert from
3575 * little-endian byte order to native cpu byte order, or
3579 * Inherited from caller.
3581 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3586 for (i = 0; i < buf_words; i++)
3587 buf[i] = le16_to_cpu(buf[i]);
3588 #endif /* __BIG_ENDIAN */
3592 * ata_mmio_data_xfer - Transfer data by MMIO
3593 * @adev: device for this I/O
3595 * @buflen: buffer length
3596 * @write_data: read/write
3598 * Transfer data from/to the device data register by MMIO.
3601 * Inherited from caller.
3604 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3605 unsigned int buflen, int write_data)
3607 struct ata_port *ap = adev->ap;
3609 unsigned int words = buflen >> 1;
3610 u16 *buf16 = (u16 *) buf;
3611 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3613 /* Transfer multiple of 2 bytes */
3615 for (i = 0; i < words; i++)
3616 writew(le16_to_cpu(buf16[i]), mmio);
3618 for (i = 0; i < words; i++)
3619 buf16[i] = cpu_to_le16(readw(mmio));
3622 /* Transfer trailing 1 byte, if any. */
3623 if (unlikely(buflen & 0x01)) {
3624 u16 align_buf[1] = { 0 };
3625 unsigned char *trailing_buf = buf + buflen - 1;
3628 memcpy(align_buf, trailing_buf, 1);
3629 writew(le16_to_cpu(align_buf[0]), mmio);
3631 align_buf[0] = cpu_to_le16(readw(mmio));
3632 memcpy(trailing_buf, align_buf, 1);
3638 * ata_pio_data_xfer - Transfer data by PIO
3639 * @adev: device to target
3641 * @buflen: buffer length
3642 * @write_data: read/write
3644 * Transfer data from/to the device data register by PIO.
3647 * Inherited from caller.
3650 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3651 unsigned int buflen, int write_data)
3653 struct ata_port *ap = adev->ap;
3654 unsigned int words = buflen >> 1;
3656 /* Transfer multiple of 2 bytes */
3658 outsw(ap->ioaddr.data_addr, buf, words);
3660 insw(ap->ioaddr.data_addr, buf, words);
3662 /* Transfer trailing 1 byte, if any. */
3663 if (unlikely(buflen & 0x01)) {
3664 u16 align_buf[1] = { 0 };
3665 unsigned char *trailing_buf = buf + buflen - 1;
3668 memcpy(align_buf, trailing_buf, 1);
3669 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3671 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3672 memcpy(trailing_buf, align_buf, 1);
3678 * ata_pio_data_xfer_noirq - Transfer data by PIO
3679 * @adev: device to target
3681 * @buflen: buffer length
3682 * @write_data: read/write
3684 * Transfer data from/to the device data register by PIO. Do the
3685 * transfer with interrupts disabled.
3688 * Inherited from caller.
3691 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3692 unsigned int buflen, int write_data)
3694 unsigned long flags;
3695 local_irq_save(flags);
3696 ata_pio_data_xfer(adev, buf, buflen, write_data);
3697 local_irq_restore(flags);
3702 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3703 * @qc: Command on going
3705 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3708 * Inherited from caller.
3711 static void ata_pio_sector(struct ata_queued_cmd *qc)
3713 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3714 struct scatterlist *sg = qc->__sg;
3715 struct ata_port *ap = qc->ap;
3717 unsigned int offset;
3720 if (qc->cursect == (qc->nsect - 1))
3721 ap->hsm_task_state = HSM_ST_LAST;
3723 page = sg[qc->cursg].page;
3724 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3726 /* get the current page and offset */
3727 page = nth_page(page, (offset >> PAGE_SHIFT));
3728 offset %= PAGE_SIZE;
3730 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3732 if (PageHighMem(page)) {
3733 unsigned long flags;
3735 /* FIXME: use a bounce buffer */
3736 local_irq_save(flags);
3737 buf = kmap_atomic(page, KM_IRQ0);
3739 /* do the actual data transfer */
3740 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3742 kunmap_atomic(buf, KM_IRQ0);
3743 local_irq_restore(flags);
3745 buf = page_address(page);
3746 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3752 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3759 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3760 * @qc: Command on going
3762 * Transfer one or many ATA_SECT_SIZE of data from/to the
3763 * ATA device for the DRQ request.
3766 * Inherited from caller.
3769 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3771 if (is_multi_taskfile(&qc->tf)) {
3772 /* READ/WRITE MULTIPLE */
3775 WARN_ON(qc->dev->multi_count == 0);
3777 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3785 * atapi_send_cdb - Write CDB bytes to hardware
3786 * @ap: Port to which ATAPI device is attached.
3787 * @qc: Taskfile currently active
3789 * When device has indicated its readiness to accept
3790 * a CDB, this function is called. Send the CDB.
3796 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3799 DPRINTK("send cdb\n");
3800 WARN_ON(qc->dev->cdb_len < 12);
3802 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3803 ata_altstatus(ap); /* flush */
3805 switch (qc->tf.protocol) {
3806 case ATA_PROT_ATAPI:
3807 ap->hsm_task_state = HSM_ST;
3809 case ATA_PROT_ATAPI_NODATA:
3810 ap->hsm_task_state = HSM_ST_LAST;
3812 case ATA_PROT_ATAPI_DMA:
3813 ap->hsm_task_state = HSM_ST_LAST;
3814 /* initiate bmdma */
3815 ap->ops->bmdma_start(qc);
3821 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3822 * @qc: Command on going
3823 * @bytes: number of bytes
3825 * Transfer Transfer data from/to the ATAPI device.
3828 * Inherited from caller.
3832 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3834 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3835 struct scatterlist *sg = qc->__sg;
3836 struct ata_port *ap = qc->ap;
3839 unsigned int offset, count;
3841 if (qc->curbytes + bytes >= qc->nbytes)
3842 ap->hsm_task_state = HSM_ST_LAST;
3845 if (unlikely(qc->cursg >= qc->n_elem)) {
3847 * The end of qc->sg is reached and the device expects
3848 * more data to transfer. In order not to overrun qc->sg
3849 * and fulfill length specified in the byte count register,
3850 * - for read case, discard trailing data from the device
3851 * - for write case, padding zero data to the device
3853 u16 pad_buf[1] = { 0 };
3854 unsigned int words = bytes >> 1;
3857 if (words) /* warning if bytes > 1 */
3858 ata_dev_printk(qc->dev, KERN_WARNING,
3859 "%u bytes trailing data\n", bytes);
3861 for (i = 0; i < words; i++)
3862 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3864 ap->hsm_task_state = HSM_ST_LAST;
3868 sg = &qc->__sg[qc->cursg];
3871 offset = sg->offset + qc->cursg_ofs;
3873 /* get the current page and offset */
3874 page = nth_page(page, (offset >> PAGE_SHIFT));
3875 offset %= PAGE_SIZE;
3877 /* don't overrun current sg */
3878 count = min(sg->length - qc->cursg_ofs, bytes);
3880 /* don't cross page boundaries */
3881 count = min(count, (unsigned int)PAGE_SIZE - offset);
3883 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3885 if (PageHighMem(page)) {
3886 unsigned long flags;
3888 /* FIXME: use bounce buffer */
3889 local_irq_save(flags);
3890 buf = kmap_atomic(page, KM_IRQ0);
3892 /* do the actual data transfer */
3893 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3895 kunmap_atomic(buf, KM_IRQ0);
3896 local_irq_restore(flags);
3898 buf = page_address(page);
3899 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3903 qc->curbytes += count;
3904 qc->cursg_ofs += count;
3906 if (qc->cursg_ofs == sg->length) {
3916 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3917 * @qc: Command on going
3919 * Transfer Transfer data from/to the ATAPI device.
3922 * Inherited from caller.
3925 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3927 struct ata_port *ap = qc->ap;
3928 struct ata_device *dev = qc->dev;
3929 unsigned int ireason, bc_lo, bc_hi, bytes;
3930 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3932 /* Abuse qc->result_tf for temp storage of intermediate TF
3933 * here to save some kernel stack usage.
3934 * For normal completion, qc->result_tf is not relevant. For
3935 * error, qc->result_tf is later overwritten by ata_qc_complete().
3936 * So, the correctness of qc->result_tf is not affected.
3938 ap->ops->tf_read(ap, &qc->result_tf);
3939 ireason = qc->result_tf.nsect;
3940 bc_lo = qc->result_tf.lbam;
3941 bc_hi = qc->result_tf.lbah;
3942 bytes = (bc_hi << 8) | bc_lo;
3944 /* shall be cleared to zero, indicating xfer of data */
3945 if (ireason & (1 << 0))
3948 /* make sure transfer direction matches expected */
3949 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3950 if (do_write != i_write)
3953 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3955 __atapi_pio_bytes(qc, bytes);
3960 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3961 qc->err_mask |= AC_ERR_HSM;
3962 ap->hsm_task_state = HSM_ST_ERR;
3966 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3967 * @ap: the target ata_port
3971 * 1 if ok in workqueue, 0 otherwise.
3974 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3976 if (qc->tf.flags & ATA_TFLAG_POLLING)
3979 if (ap->hsm_task_state == HSM_ST_FIRST) {
3980 if (qc->tf.protocol == ATA_PROT_PIO &&
3981 (qc->tf.flags & ATA_TFLAG_WRITE))
3984 if (is_atapi_taskfile(&qc->tf) &&
3985 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3993 * ata_hsm_qc_complete - finish a qc running on standard HSM
3994 * @qc: Command to complete
3995 * @in_wq: 1 if called from workqueue, 0 otherwise
3997 * Finish @qc which is running on standard HSM.
4000 * If @in_wq is zero, spin_lock_irqsave(host lock).
4001 * Otherwise, none on entry and grabs host lock.
4003 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4005 struct ata_port *ap = qc->ap;
4006 unsigned long flags;
4008 if (ap->ops->error_handler) {
4010 spin_lock_irqsave(ap->lock, flags);
4012 /* EH might have kicked in while host lock is
4015 qc = ata_qc_from_tag(ap, qc->tag);
4017 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4019 ata_qc_complete(qc);
4021 ata_port_freeze(ap);
4024 spin_unlock_irqrestore(ap->lock, flags);
4026 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4027 ata_qc_complete(qc);
4029 ata_port_freeze(ap);
4033 spin_lock_irqsave(ap->lock, flags);
4035 ata_qc_complete(qc);
4036 spin_unlock_irqrestore(ap->lock, flags);
4038 ata_qc_complete(qc);
4041 ata_altstatus(ap); /* flush */
4045 * ata_hsm_move - move the HSM to the next state.
4046 * @ap: the target ata_port
4048 * @status: current device status
4049 * @in_wq: 1 if called from workqueue, 0 otherwise
4052 * 1 when poll next status needed, 0 otherwise.
4054 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4055 u8 status, int in_wq)
4057 unsigned long flags = 0;
4060 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4062 /* Make sure ata_qc_issue_prot() does not throw things
4063 * like DMA polling into the workqueue. Notice that
4064 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4066 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4069 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4070 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4072 switch (ap->hsm_task_state) {
4074 /* Send first data block or PACKET CDB */
4076 /* If polling, we will stay in the work queue after
4077 * sending the data. Otherwise, interrupt handler
4078 * takes over after sending the data.
4080 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4082 /* check device status */
4083 if (unlikely((status & ATA_DRQ) == 0)) {
4084 /* handle BSY=0, DRQ=0 as error */
4085 if (likely(status & (ATA_ERR | ATA_DF)))
4086 /* device stops HSM for abort/error */
4087 qc->err_mask |= AC_ERR_DEV;
4089 /* HSM violation. Let EH handle this */
4090 qc->err_mask |= AC_ERR_HSM;
4092 ap->hsm_task_state = HSM_ST_ERR;
4096 /* Device should not ask for data transfer (DRQ=1)
4097 * when it finds something wrong.
4098 * We ignore DRQ here and stop the HSM by
4099 * changing hsm_task_state to HSM_ST_ERR and
4100 * let the EH abort the command or reset the device.
4102 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4103 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4105 qc->err_mask |= AC_ERR_HSM;
4106 ap->hsm_task_state = HSM_ST_ERR;
4110 /* Send the CDB (atapi) or the first data block (ata pio out).
4111 * During the state transition, interrupt handler shouldn't
4112 * be invoked before the data transfer is complete and
4113 * hsm_task_state is changed. Hence, the following locking.
4116 spin_lock_irqsave(ap->lock, flags);
4118 if (qc->tf.protocol == ATA_PROT_PIO) {
4119 /* PIO data out protocol.
4120 * send first data block.
4123 /* ata_pio_sectors() might change the state
4124 * to HSM_ST_LAST. so, the state is changed here
4125 * before ata_pio_sectors().
4127 ap->hsm_task_state = HSM_ST;
4128 ata_pio_sectors(qc);
4129 ata_altstatus(ap); /* flush */
4132 atapi_send_cdb(ap, qc);
4135 spin_unlock_irqrestore(ap->lock, flags);
4137 /* if polling, ata_pio_task() handles the rest.
4138 * otherwise, interrupt handler takes over from here.
4143 /* complete command or read/write the data register */
4144 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4145 /* ATAPI PIO protocol */
4146 if ((status & ATA_DRQ) == 0) {
4147 /* No more data to transfer or device error.
4148 * Device error will be tagged in HSM_ST_LAST.
4150 ap->hsm_task_state = HSM_ST_LAST;
4154 /* Device should not ask for data transfer (DRQ=1)
4155 * when it finds something wrong.
4156 * We ignore DRQ here and stop the HSM by
4157 * changing hsm_task_state to HSM_ST_ERR and
4158 * let the EH abort the command or reset the device.
4160 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4161 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4163 qc->err_mask |= AC_ERR_HSM;
4164 ap->hsm_task_state = HSM_ST_ERR;
4168 atapi_pio_bytes(qc);
4170 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4171 /* bad ireason reported by device */
4175 /* ATA PIO protocol */
4176 if (unlikely((status & ATA_DRQ) == 0)) {
4177 /* handle BSY=0, DRQ=0 as error */
4178 if (likely(status & (ATA_ERR | ATA_DF)))
4179 /* device stops HSM for abort/error */
4180 qc->err_mask |= AC_ERR_DEV;
4182 /* HSM violation. Let EH handle this */
4183 qc->err_mask |= AC_ERR_HSM;
4185 ap->hsm_task_state = HSM_ST_ERR;
4189 /* For PIO reads, some devices may ask for
4190 * data transfer (DRQ=1) alone with ERR=1.
4191 * We respect DRQ here and transfer one
4192 * block of junk data before changing the
4193 * hsm_task_state to HSM_ST_ERR.
4195 * For PIO writes, ERR=1 DRQ=1 doesn't make
4196 * sense since the data block has been
4197 * transferred to the device.
4199 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4200 /* data might be corrputed */
4201 qc->err_mask |= AC_ERR_DEV;
4203 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4204 ata_pio_sectors(qc);
4206 status = ata_wait_idle(ap);
4209 if (status & (ATA_BUSY | ATA_DRQ))
4210 qc->err_mask |= AC_ERR_HSM;
4212 /* ata_pio_sectors() might change the
4213 * state to HSM_ST_LAST. so, the state
4214 * is changed after ata_pio_sectors().
4216 ap->hsm_task_state = HSM_ST_ERR;
4220 ata_pio_sectors(qc);
4222 if (ap->hsm_task_state == HSM_ST_LAST &&
4223 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4226 status = ata_wait_idle(ap);
4231 ata_altstatus(ap); /* flush */
4236 if (unlikely(!ata_ok(status))) {
4237 qc->err_mask |= __ac_err_mask(status);
4238 ap->hsm_task_state = HSM_ST_ERR;
4242 /* no more data to transfer */
4243 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4244 ap->id, qc->dev->devno, status);
4246 WARN_ON(qc->err_mask);
4248 ap->hsm_task_state = HSM_ST_IDLE;
4250 /* complete taskfile transaction */
4251 ata_hsm_qc_complete(qc, in_wq);
4257 /* make sure qc->err_mask is available to
4258 * know what's wrong and recover
4260 WARN_ON(qc->err_mask == 0);
4262 ap->hsm_task_state = HSM_ST_IDLE;
4264 /* complete taskfile transaction */
4265 ata_hsm_qc_complete(qc, in_wq);
4277 static void ata_pio_task(void *_data)
4279 struct ata_queued_cmd *qc = _data;
4280 struct ata_port *ap = qc->ap;
4285 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4288 * This is purely heuristic. This is a fast path.
4289 * Sometimes when we enter, BSY will be cleared in
4290 * a chk-status or two. If not, the drive is probably seeking
4291 * or something. Snooze for a couple msecs, then
4292 * chk-status again. If still busy, queue delayed work.
4294 status = ata_busy_wait(ap, ATA_BUSY, 5);
4295 if (status & ATA_BUSY) {
4297 status = ata_busy_wait(ap, ATA_BUSY, 10);
4298 if (status & ATA_BUSY) {
4299 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4305 poll_next = ata_hsm_move(ap, qc, status, 1);
4307 /* another command or interrupt handler
4308 * may be running at this point.
4315 * ata_qc_new - Request an available ATA command, for queueing
4316 * @ap: Port associated with device @dev
4317 * @dev: Device from whom we request an available command structure
4323 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4325 struct ata_queued_cmd *qc = NULL;
4328 /* no command while frozen */
4329 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4332 /* the last tag is reserved for internal command. */
4333 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4334 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4335 qc = __ata_qc_from_tag(ap, i);
4346 * ata_qc_new_init - Request an available ATA command, and initialize it
4347 * @dev: Device from whom we request an available command structure
4353 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4355 struct ata_port *ap = dev->ap;
4356 struct ata_queued_cmd *qc;
4358 qc = ata_qc_new(ap);
4371 * ata_qc_free - free unused ata_queued_cmd
4372 * @qc: Command to complete
4374 * Designed to free unused ata_queued_cmd object
4375 * in case something prevents using it.
4378 * spin_lock_irqsave(host lock)
4380 void ata_qc_free(struct ata_queued_cmd *qc)
4382 struct ata_port *ap = qc->ap;
4385 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4389 if (likely(ata_tag_valid(tag))) {
4390 qc->tag = ATA_TAG_POISON;
4391 clear_bit(tag, &ap->qc_allocated);
4395 void __ata_qc_complete(struct ata_queued_cmd *qc)
4397 struct ata_port *ap = qc->ap;
4399 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4400 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4402 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4405 /* command should be marked inactive atomically with qc completion */
4406 if (qc->tf.protocol == ATA_PROT_NCQ)
4407 ap->sactive &= ~(1 << qc->tag);
4409 ap->active_tag = ATA_TAG_POISON;
4411 /* atapi: mark qc as inactive to prevent the interrupt handler
4412 * from completing the command twice later, before the error handler
4413 * is called. (when rc != 0 and atapi request sense is needed)
4415 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4416 ap->qc_active &= ~(1 << qc->tag);
4418 /* call completion callback */
4419 qc->complete_fn(qc);
4423 * ata_qc_complete - Complete an active ATA command
4424 * @qc: Command to complete
4425 * @err_mask: ATA Status register contents
4427 * Indicate to the mid and upper layers that an ATA
4428 * command has completed, with either an ok or not-ok status.
4431 * spin_lock_irqsave(host lock)
4433 void ata_qc_complete(struct ata_queued_cmd *qc)
4435 struct ata_port *ap = qc->ap;
4437 /* XXX: New EH and old EH use different mechanisms to
4438 * synchronize EH with regular execution path.
4440 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4441 * Normal execution path is responsible for not accessing a
4442 * failed qc. libata core enforces the rule by returning NULL
4443 * from ata_qc_from_tag() for failed qcs.
4445 * Old EH depends on ata_qc_complete() nullifying completion
4446 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4447 * not synchronize with interrupt handler. Only PIO task is
4450 if (ap->ops->error_handler) {
4451 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4453 if (unlikely(qc->err_mask))
4454 qc->flags |= ATA_QCFLAG_FAILED;
4456 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4457 if (!ata_tag_internal(qc->tag)) {
4458 /* always fill result TF for failed qc */
4459 ap->ops->tf_read(ap, &qc->result_tf);
4460 ata_qc_schedule_eh(qc);
4465 /* read result TF if requested */
4466 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4467 ap->ops->tf_read(ap, &qc->result_tf);
4469 __ata_qc_complete(qc);
4471 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4474 /* read result TF if failed or requested */
4475 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4476 ap->ops->tf_read(ap, &qc->result_tf);
4478 __ata_qc_complete(qc);
4483 * ata_qc_complete_multiple - Complete multiple qcs successfully
4484 * @ap: port in question
4485 * @qc_active: new qc_active mask
4486 * @finish_qc: LLDD callback invoked before completing a qc
4488 * Complete in-flight commands. This functions is meant to be
4489 * called from low-level driver's interrupt routine to complete
4490 * requests normally. ap->qc_active and @qc_active is compared
4491 * and commands are completed accordingly.
4494 * spin_lock_irqsave(host lock)
4497 * Number of completed commands on success, -errno otherwise.
4499 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4500 void (*finish_qc)(struct ata_queued_cmd *))
4506 done_mask = ap->qc_active ^ qc_active;
4508 if (unlikely(done_mask & qc_active)) {
4509 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4510 "(%08x->%08x)\n", ap->qc_active, qc_active);
4514 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4515 struct ata_queued_cmd *qc;
4517 if (!(done_mask & (1 << i)))
4520 if ((qc = ata_qc_from_tag(ap, i))) {
4523 ata_qc_complete(qc);
4531 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4533 struct ata_port *ap = qc->ap;
4535 switch (qc->tf.protocol) {
4538 case ATA_PROT_ATAPI_DMA:
4541 case ATA_PROT_ATAPI:
4543 if (ap->flags & ATA_FLAG_PIO_DMA)
4556 * ata_qc_issue - issue taskfile to device
4557 * @qc: command to issue to device
4559 * Prepare an ATA command to submission to device.
4560 * This includes mapping the data into a DMA-able
4561 * area, filling in the S/G table, and finally
4562 * writing the taskfile to hardware, starting the command.
4565 * spin_lock_irqsave(host lock)
4567 void ata_qc_issue(struct ata_queued_cmd *qc)
4569 struct ata_port *ap = qc->ap;
4571 /* Make sure only one non-NCQ command is outstanding. The
4572 * check is skipped for old EH because it reuses active qc to
4573 * request ATAPI sense.
4575 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4577 if (qc->tf.protocol == ATA_PROT_NCQ) {
4578 WARN_ON(ap->sactive & (1 << qc->tag));
4579 ap->sactive |= 1 << qc->tag;
4581 WARN_ON(ap->sactive);
4582 ap->active_tag = qc->tag;
4585 qc->flags |= ATA_QCFLAG_ACTIVE;
4586 ap->qc_active |= 1 << qc->tag;
4588 if (ata_should_dma_map(qc)) {
4589 if (qc->flags & ATA_QCFLAG_SG) {
4590 if (ata_sg_setup(qc))
4592 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4593 if (ata_sg_setup_one(qc))
4597 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4600 ap->ops->qc_prep(qc);
4602 qc->err_mask |= ap->ops->qc_issue(qc);
4603 if (unlikely(qc->err_mask))
4608 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4609 qc->err_mask |= AC_ERR_SYSTEM;
4611 ata_qc_complete(qc);
4615 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4616 * @qc: command to issue to device
4618 * Using various libata functions and hooks, this function
4619 * starts an ATA command. ATA commands are grouped into
4620 * classes called "protocols", and issuing each type of protocol
4621 * is slightly different.
4623 * May be used as the qc_issue() entry in ata_port_operations.
4626 * spin_lock_irqsave(host lock)
4629 * Zero on success, AC_ERR_* mask on failure
4632 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4634 struct ata_port *ap = qc->ap;
4636 /* Use polling pio if the LLD doesn't handle
4637 * interrupt driven pio and atapi CDB interrupt.
4639 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4640 switch (qc->tf.protocol) {
4642 case ATA_PROT_ATAPI:
4643 case ATA_PROT_ATAPI_NODATA:
4644 qc->tf.flags |= ATA_TFLAG_POLLING;
4646 case ATA_PROT_ATAPI_DMA:
4647 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4648 /* see ata_dma_blacklisted() */
4656 /* select the device */
4657 ata_dev_select(ap, qc->dev->devno, 1, 0);
4659 /* start the command */
4660 switch (qc->tf.protocol) {
4661 case ATA_PROT_NODATA:
4662 if (qc->tf.flags & ATA_TFLAG_POLLING)
4663 ata_qc_set_polling(qc);
4665 ata_tf_to_host(ap, &qc->tf);
4666 ap->hsm_task_state = HSM_ST_LAST;
4668 if (qc->tf.flags & ATA_TFLAG_POLLING)
4669 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4674 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4676 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4677 ap->ops->bmdma_setup(qc); /* set up bmdma */
4678 ap->ops->bmdma_start(qc); /* initiate bmdma */
4679 ap->hsm_task_state = HSM_ST_LAST;
4683 if (qc->tf.flags & ATA_TFLAG_POLLING)
4684 ata_qc_set_polling(qc);
4686 ata_tf_to_host(ap, &qc->tf);
4688 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4689 /* PIO data out protocol */
4690 ap->hsm_task_state = HSM_ST_FIRST;
4691 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4693 /* always send first data block using
4694 * the ata_pio_task() codepath.
4697 /* PIO data in protocol */
4698 ap->hsm_task_state = HSM_ST;
4700 if (qc->tf.flags & ATA_TFLAG_POLLING)
4701 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4703 /* if polling, ata_pio_task() handles the rest.
4704 * otherwise, interrupt handler takes over from here.
4710 case ATA_PROT_ATAPI:
4711 case ATA_PROT_ATAPI_NODATA:
4712 if (qc->tf.flags & ATA_TFLAG_POLLING)
4713 ata_qc_set_polling(qc);
4715 ata_tf_to_host(ap, &qc->tf);
4717 ap->hsm_task_state = HSM_ST_FIRST;
4719 /* send cdb by polling if no cdb interrupt */
4720 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4721 (qc->tf.flags & ATA_TFLAG_POLLING))
4722 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4725 case ATA_PROT_ATAPI_DMA:
4726 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4728 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4729 ap->ops->bmdma_setup(qc); /* set up bmdma */
4730 ap->hsm_task_state = HSM_ST_FIRST;
4732 /* send cdb by polling if no cdb interrupt */
4733 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4734 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4739 return AC_ERR_SYSTEM;
4746 * ata_host_intr - Handle host interrupt for given (port, task)
4747 * @ap: Port on which interrupt arrived (possibly...)
4748 * @qc: Taskfile currently active in engine
4750 * Handle host interrupt for given queued command. Currently,
4751 * only DMA interrupts are handled. All other commands are
4752 * handled via polling with interrupts disabled (nIEN bit).
4755 * spin_lock_irqsave(host lock)
4758 * One if interrupt was handled, zero if not (shared irq).
4761 inline unsigned int ata_host_intr (struct ata_port *ap,
4762 struct ata_queued_cmd *qc)
4764 u8 status, host_stat = 0;
4766 VPRINTK("ata%u: protocol %d task_state %d\n",
4767 ap->id, qc->tf.protocol, ap->hsm_task_state);
4769 /* Check whether we are expecting interrupt in this state */
4770 switch (ap->hsm_task_state) {
4772 /* Some pre-ATAPI-4 devices assert INTRQ
4773 * at this state when ready to receive CDB.
4776 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4777 * The flag was turned on only for atapi devices.
4778 * No need to check is_atapi_taskfile(&qc->tf) again.
4780 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4784 if (qc->tf.protocol == ATA_PROT_DMA ||
4785 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4786 /* check status of DMA engine */
4787 host_stat = ap->ops->bmdma_status(ap);
4788 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4790 /* if it's not our irq... */
4791 if (!(host_stat & ATA_DMA_INTR))
4794 /* before we do anything else, clear DMA-Start bit */
4795 ap->ops->bmdma_stop(qc);
4797 if (unlikely(host_stat & ATA_DMA_ERR)) {
4798 /* error when transfering data to/from memory */
4799 qc->err_mask |= AC_ERR_HOST_BUS;
4800 ap->hsm_task_state = HSM_ST_ERR;
4810 /* check altstatus */
4811 status = ata_altstatus(ap);
4812 if (status & ATA_BUSY)
4815 /* check main status, clearing INTRQ */
4816 status = ata_chk_status(ap);
4817 if (unlikely(status & ATA_BUSY))
4820 /* ack bmdma irq events */
4821 ap->ops->irq_clear(ap);
4823 ata_hsm_move(ap, qc, status, 0);
4824 return 1; /* irq handled */
4827 ap->stats.idle_irq++;
4830 if ((ap->stats.idle_irq % 1000) == 0) {
4831 ata_irq_ack(ap, 0); /* debug trap */
4832 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4836 return 0; /* irq not handled */
4840 * ata_interrupt - Default ATA host interrupt handler
4841 * @irq: irq line (unused)
4842 * @dev_instance: pointer to our ata_host information structure
4845 * Default interrupt handler for PCI IDE devices. Calls
4846 * ata_host_intr() for each port that is not disabled.
4849 * Obtains host lock during operation.
4852 * IRQ_NONE or IRQ_HANDLED.
4855 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4857 struct ata_host *host = dev_instance;
4859 unsigned int handled = 0;
4860 unsigned long flags;
4862 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4863 spin_lock_irqsave(&host->lock, flags);
4865 for (i = 0; i < host->n_ports; i++) {
4866 struct ata_port *ap;
4868 ap = host->ports[i];
4870 !(ap->flags & ATA_FLAG_DISABLED)) {
4871 struct ata_queued_cmd *qc;
4873 qc = ata_qc_from_tag(ap, ap->active_tag);
4874 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4875 (qc->flags & ATA_QCFLAG_ACTIVE))
4876 handled |= ata_host_intr(ap, qc);
4880 spin_unlock_irqrestore(&host->lock, flags);
4882 return IRQ_RETVAL(handled);
4886 * sata_scr_valid - test whether SCRs are accessible
4887 * @ap: ATA port to test SCR accessibility for
4889 * Test whether SCRs are accessible for @ap.
4895 * 1 if SCRs are accessible, 0 otherwise.
4897 int sata_scr_valid(struct ata_port *ap)
4899 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4903 * sata_scr_read - read SCR register of the specified port
4904 * @ap: ATA port to read SCR for
4906 * @val: Place to store read value
4908 * Read SCR register @reg of @ap into *@val. This function is
4909 * guaranteed to succeed if the cable type of the port is SATA
4910 * and the port implements ->scr_read.
4916 * 0 on success, negative errno on failure.
4918 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4920 if (sata_scr_valid(ap)) {
4921 *val = ap->ops->scr_read(ap, reg);
4928 * sata_scr_write - write SCR register of the specified port
4929 * @ap: ATA port to write SCR for
4930 * @reg: SCR to write
4931 * @val: value to write
4933 * Write @val to SCR register @reg of @ap. This function is
4934 * guaranteed to succeed if the cable type of the port is SATA
4935 * and the port implements ->scr_read.
4941 * 0 on success, negative errno on failure.
4943 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4945 if (sata_scr_valid(ap)) {
4946 ap->ops->scr_write(ap, reg, val);
4953 * sata_scr_write_flush - write SCR register of the specified port and flush
4954 * @ap: ATA port to write SCR for
4955 * @reg: SCR to write
4956 * @val: value to write
4958 * This function is identical to sata_scr_write() except that this
4959 * function performs flush after writing to the register.
4965 * 0 on success, negative errno on failure.
4967 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4969 if (sata_scr_valid(ap)) {
4970 ap->ops->scr_write(ap, reg, val);
4971 ap->ops->scr_read(ap, reg);
4978 * ata_port_online - test whether the given port is online
4979 * @ap: ATA port to test
4981 * Test whether @ap is online. Note that this function returns 0
4982 * if online status of @ap cannot be obtained, so
4983 * ata_port_online(ap) != !ata_port_offline(ap).
4989 * 1 if the port online status is available and online.
4991 int ata_port_online(struct ata_port *ap)
4995 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5001 * ata_port_offline - test whether the given port is offline
5002 * @ap: ATA port to test
5004 * Test whether @ap is offline. Note that this function returns
5005 * 0 if offline status of @ap cannot be obtained, so
5006 * ata_port_online(ap) != !ata_port_offline(ap).
5012 * 1 if the port offline status is available and offline.
5014 int ata_port_offline(struct ata_port *ap)
5018 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5023 int ata_flush_cache(struct ata_device *dev)
5025 unsigned int err_mask;
5028 if (!ata_try_flush_cache(dev))
5031 if (ata_id_has_flush_ext(dev->id))
5032 cmd = ATA_CMD_FLUSH_EXT;
5034 cmd = ATA_CMD_FLUSH;
5036 err_mask = ata_do_simple_cmd(dev, cmd);
5038 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5045 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5046 unsigned int action, unsigned int ehi_flags,
5049 unsigned long flags;
5052 for (i = 0; i < host->n_ports; i++) {
5053 struct ata_port *ap = host->ports[i];
5055 /* Previous resume operation might still be in
5056 * progress. Wait for PM_PENDING to clear.
5058 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5059 ata_port_wait_eh(ap);
5060 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5063 /* request PM ops to EH */
5064 spin_lock_irqsave(ap->lock, flags);
5069 ap->pm_result = &rc;
5072 ap->pflags |= ATA_PFLAG_PM_PENDING;
5073 ap->eh_info.action |= action;
5074 ap->eh_info.flags |= ehi_flags;
5076 ata_port_schedule_eh(ap);
5078 spin_unlock_irqrestore(ap->lock, flags);
5080 /* wait and check result */
5082 ata_port_wait_eh(ap);
5083 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5093 * ata_host_suspend - suspend host
5094 * @host: host to suspend
5097 * Suspend @host. Actual operation is performed by EH. This
5098 * function requests EH to perform PM operations and waits for EH
5102 * Kernel thread context (may sleep).
5105 * 0 on success, -errno on failure.
5107 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5111 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5115 /* EH is quiescent now. Fail if we have any ready device.
5116 * This happens if hotplug occurs between completion of device
5117 * suspension and here.
5119 for (i = 0; i < host->n_ports; i++) {
5120 struct ata_port *ap = host->ports[i];
5122 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5123 struct ata_device *dev = &ap->device[j];
5125 if (ata_dev_ready(dev)) {
5126 ata_port_printk(ap, KERN_WARNING,
5127 "suspend failed, device %d "
5128 "still active\n", dev->devno);
5135 host->dev->power.power_state = mesg;
5139 ata_host_resume(host);
5144 * ata_host_resume - resume host
5145 * @host: host to resume
5147 * Resume @host. Actual operation is performed by EH. This
5148 * function requests EH to perform PM operations and returns.
5149 * Note that all resume operations are performed parallely.
5152 * Kernel thread context (may sleep).
5154 void ata_host_resume(struct ata_host *host)
5156 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5157 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5158 host->dev->power.power_state = PMSG_ON;
5162 * ata_port_start - Set port up for dma.
5163 * @ap: Port to initialize
5165 * Called just after data structures for each port are
5166 * initialized. Allocates space for PRD table.
5168 * May be used as the port_start() entry in ata_port_operations.
5171 * Inherited from caller.
5174 int ata_port_start (struct ata_port *ap)
5176 struct device *dev = ap->dev;
5179 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5183 rc = ata_pad_alloc(ap, dev);
5185 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5189 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5196 * ata_port_stop - Undo ata_port_start()
5197 * @ap: Port to shut down
5199 * Frees the PRD table.
5201 * May be used as the port_stop() entry in ata_port_operations.
5204 * Inherited from caller.
5207 void ata_port_stop (struct ata_port *ap)
5209 struct device *dev = ap->dev;
5211 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5212 ata_pad_free(ap, dev);
5215 void ata_host_stop (struct ata_host *host)
5217 if (host->mmio_base)
5218 iounmap(host->mmio_base);
5222 * ata_dev_init - Initialize an ata_device structure
5223 * @dev: Device structure to initialize
5225 * Initialize @dev in preparation for probing.
5228 * Inherited from caller.
5230 void ata_dev_init(struct ata_device *dev)
5232 struct ata_port *ap = dev->ap;
5233 unsigned long flags;
5235 /* SATA spd limit is bound to the first device */
5236 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5238 /* High bits of dev->flags are used to record warm plug
5239 * requests which occur asynchronously. Synchronize using
5242 spin_lock_irqsave(ap->lock, flags);
5243 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5244 spin_unlock_irqrestore(ap->lock, flags);
5246 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5247 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5248 dev->pio_mask = UINT_MAX;
5249 dev->mwdma_mask = UINT_MAX;
5250 dev->udma_mask = UINT_MAX;
5254 * ata_port_init - Initialize an ata_port structure
5255 * @ap: Structure to initialize
5256 * @host: Collection of hosts to which @ap belongs
5257 * @ent: Probe information provided by low-level driver
5258 * @port_no: Port number associated with this ata_port
5260 * Initialize a new ata_port structure.
5263 * Inherited from caller.
5265 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5266 const struct ata_probe_ent *ent, unsigned int port_no)
5270 ap->lock = &host->lock;
5271 ap->flags = ATA_FLAG_DISABLED;
5272 ap->id = ata_unique_id++;
5273 ap->ctl = ATA_DEVCTL_OBS;
5276 ap->port_no = port_no;
5277 if (port_no == 1 && ent->pinfo2) {
5278 ap->pio_mask = ent->pinfo2->pio_mask;
5279 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5280 ap->udma_mask = ent->pinfo2->udma_mask;
5281 ap->flags |= ent->pinfo2->flags;
5282 ap->ops = ent->pinfo2->port_ops;
5284 ap->pio_mask = ent->pio_mask;
5285 ap->mwdma_mask = ent->mwdma_mask;
5286 ap->udma_mask = ent->udma_mask;
5287 ap->flags |= ent->port_flags;
5288 ap->ops = ent->port_ops;
5290 ap->hw_sata_spd_limit = UINT_MAX;
5291 ap->active_tag = ATA_TAG_POISON;
5292 ap->last_ctl = 0xFF;
5294 #if defined(ATA_VERBOSE_DEBUG)
5295 /* turn on all debugging levels */
5296 ap->msg_enable = 0x00FF;
5297 #elif defined(ATA_DEBUG)
5298 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5300 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5303 INIT_WORK(&ap->port_task, NULL, NULL);
5304 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5305 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5306 INIT_LIST_HEAD(&ap->eh_done_q);
5307 init_waitqueue_head(&ap->eh_wait_q);
5309 /* set cable type */
5310 ap->cbl = ATA_CBL_NONE;
5311 if (ap->flags & ATA_FLAG_SATA)
5312 ap->cbl = ATA_CBL_SATA;
5314 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5315 struct ata_device *dev = &ap->device[i];
5322 ap->stats.unhandled_irq = 1;
5323 ap->stats.idle_irq = 1;
5326 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5330 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5331 * @ap: ATA port to initialize SCSI host for
5332 * @shost: SCSI host associated with @ap
5334 * Initialize SCSI host @shost associated with ATA port @ap.
5337 * Inherited from caller.
5339 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5341 ap->scsi_host = shost;
5343 shost->unique_id = ap->id;
5346 shost->max_channel = 1;
5347 shost->max_cmd_len = 12;
5351 * ata_port_add - Attach low-level ATA driver to system
5352 * @ent: Information provided by low-level driver
5353 * @host: Collections of ports to which we add
5354 * @port_no: Port number associated with this host
5356 * Attach low-level ATA driver to system.
5359 * PCI/etc. bus probe sem.
5362 * New ata_port on success, for NULL on error.
5364 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5365 struct ata_host *host,
5366 unsigned int port_no)
5368 struct Scsi_Host *shost;
5369 struct ata_port *ap;
5373 if (!ent->port_ops->error_handler &&
5374 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5375 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5380 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5384 shost->transportt = &ata_scsi_transport_template;
5386 ap = ata_shost_to_port(shost);
5388 ata_port_init(ap, host, ent, port_no);
5389 ata_port_init_shost(ap, shost);
5395 * ata_sas_host_init - Initialize a host struct
5396 * @host: host to initialize
5397 * @dev: device host is attached to
5398 * @flags: host flags
5402 * PCI/etc. bus probe sem.
5406 void ata_host_init(struct ata_host *host, struct device *dev,
5407 unsigned long flags, const struct ata_port_operations *ops)
5409 spin_lock_init(&host->lock);
5411 host->flags = flags;
5416 * ata_device_add - Register hardware device with ATA and SCSI layers
5417 * @ent: Probe information describing hardware device to be registered
5419 * This function processes the information provided in the probe
5420 * information struct @ent, allocates the necessary ATA and SCSI
5421 * host information structures, initializes them, and registers
5422 * everything with requisite kernel subsystems.
5424 * This function requests irqs, probes the ATA bus, and probes
5428 * PCI/etc. bus probe sem.
5431 * Number of ports registered. Zero on error (no ports registered).
5433 int ata_device_add(const struct ata_probe_ent *ent)
5436 struct device *dev = ent->dev;
5437 struct ata_host *host;
5441 /* alloc a container for our list of ATA ports (buses) */
5442 host = kzalloc(sizeof(struct ata_host) +
5443 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5447 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5448 host->n_ports = ent->n_ports;
5449 host->irq = ent->irq;
5450 host->irq2 = ent->irq2;
5451 host->mmio_base = ent->mmio_base;
5452 host->private_data = ent->private_data;
5454 /* register each port bound to this device */
5455 for (i = 0; i < host->n_ports; i++) {
5456 struct ata_port *ap;
5457 unsigned long xfer_mode_mask;
5458 int irq_line = ent->irq;
5460 ap = ata_port_add(ent, host, i);
5464 host->ports[i] = ap;
5467 if (ent->dummy_port_mask & (1 << i)) {
5468 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5469 ap->ops = &ata_dummy_port_ops;
5474 rc = ap->ops->port_start(ap);
5476 host->ports[i] = NULL;
5477 scsi_host_put(ap->scsi_host);
5481 /* Report the secondary IRQ for second channel legacy */
5482 if (i == 1 && ent->irq2)
5483 irq_line = ent->irq2;
5485 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5486 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5487 (ap->pio_mask << ATA_SHIFT_PIO);
5489 /* print per-port info to dmesg */
5490 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5491 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5492 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5493 ata_mode_string(xfer_mode_mask),
5494 ap->ioaddr.cmd_addr,
5495 ap->ioaddr.ctl_addr,
5496 ap->ioaddr.bmdma_addr,
5500 host->ops->irq_clear(ap);
5501 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5504 /* obtain irq, that may be shared between channels */
5505 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5508 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5513 /* do we have a second IRQ for the other channel, eg legacy mode */
5515 /* We will get weird core code crashes later if this is true
5517 BUG_ON(ent->irq == ent->irq2);
5519 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5522 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5524 goto err_out_free_irq;
5528 /* perform each probe synchronously */
5529 DPRINTK("probe begin\n");
5530 for (i = 0; i < host->n_ports; i++) {
5531 struct ata_port *ap = host->ports[i];
5535 /* init sata_spd_limit to the current value */
5536 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5537 int spd = (scontrol >> 4) & 0xf;
5538 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5540 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5542 rc = scsi_add_host(ap->scsi_host, dev);
5544 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5545 /* FIXME: do something useful here */
5546 /* FIXME: handle unconditional calls to
5547 * scsi_scan_host and ata_host_remove, below,
5552 if (ap->ops->error_handler) {
5553 struct ata_eh_info *ehi = &ap->eh_info;
5554 unsigned long flags;
5558 /* kick EH for boot probing */
5559 spin_lock_irqsave(ap->lock, flags);
5561 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5562 ehi->action |= ATA_EH_SOFTRESET;
5563 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5565 ap->pflags |= ATA_PFLAG_LOADING;
5566 ata_port_schedule_eh(ap);
5568 spin_unlock_irqrestore(ap->lock, flags);
5570 /* wait for EH to finish */
5571 ata_port_wait_eh(ap);
5573 DPRINTK("ata%u: bus probe begin\n", ap->id);
5574 rc = ata_bus_probe(ap);
5575 DPRINTK("ata%u: bus probe end\n", ap->id);
5578 /* FIXME: do something useful here?
5579 * Current libata behavior will
5580 * tear down everything when
5581 * the module is removed
5582 * or the h/w is unplugged.
5588 /* probes are done, now scan each port's disk(s) */
5589 DPRINTK("host probe begin\n");
5590 for (i = 0; i < host->n_ports; i++) {
5591 struct ata_port *ap = host->ports[i];
5593 ata_scsi_scan_host(ap);
5596 dev_set_drvdata(dev, host);
5598 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5599 return ent->n_ports; /* success */
5602 free_irq(ent->irq, host);
5604 for (i = 0; i < host->n_ports; i++) {
5605 struct ata_port *ap = host->ports[i];
5607 ap->ops->port_stop(ap);
5608 scsi_host_put(ap->scsi_host);
5613 VPRINTK("EXIT, returning 0\n");
5618 * ata_port_detach - Detach ATA port in prepration of device removal
5619 * @ap: ATA port to be detached
5621 * Detach all ATA devices and the associated SCSI devices of @ap;
5622 * then, remove the associated SCSI host. @ap is guaranteed to
5623 * be quiescent on return from this function.
5626 * Kernel thread context (may sleep).
5628 void ata_port_detach(struct ata_port *ap)
5630 unsigned long flags;
5633 if (!ap->ops->error_handler)
5636 /* tell EH we're leaving & flush EH */
5637 spin_lock_irqsave(ap->lock, flags);
5638 ap->pflags |= ATA_PFLAG_UNLOADING;
5639 spin_unlock_irqrestore(ap->lock, flags);
5641 ata_port_wait_eh(ap);
5643 /* EH is now guaranteed to see UNLOADING, so no new device
5644 * will be attached. Disable all existing devices.
5646 spin_lock_irqsave(ap->lock, flags);
5648 for (i = 0; i < ATA_MAX_DEVICES; i++)
5649 ata_dev_disable(&ap->device[i]);
5651 spin_unlock_irqrestore(ap->lock, flags);
5653 /* Final freeze & EH. All in-flight commands are aborted. EH
5654 * will be skipped and retrials will be terminated with bad
5657 spin_lock_irqsave(ap->lock, flags);
5658 ata_port_freeze(ap); /* won't be thawed */
5659 spin_unlock_irqrestore(ap->lock, flags);
5661 ata_port_wait_eh(ap);
5663 /* Flush hotplug task. The sequence is similar to
5664 * ata_port_flush_task().
5666 flush_workqueue(ata_aux_wq);
5667 cancel_delayed_work(&ap->hotplug_task);
5668 flush_workqueue(ata_aux_wq);
5671 /* remove the associated SCSI host */
5672 scsi_remove_host(ap->scsi_host);
5676 * ata_host_remove - PCI layer callback for device removal
5677 * @host: ATA host set that was removed
5679 * Unregister all objects associated with this host set. Free those
5683 * Inherited from calling layer (may sleep).
5686 void ata_host_remove(struct ata_host *host)
5690 for (i = 0; i < host->n_ports; i++)
5691 ata_port_detach(host->ports[i]);
5693 free_irq(host->irq, host);
5695 free_irq(host->irq2, host);
5697 for (i = 0; i < host->n_ports; i++) {
5698 struct ata_port *ap = host->ports[i];
5700 ata_scsi_release(ap->scsi_host);
5702 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5703 struct ata_ioports *ioaddr = &ap->ioaddr;
5705 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5706 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5707 release_region(ATA_PRIMARY_CMD, 8);
5708 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5709 release_region(ATA_SECONDARY_CMD, 8);
5712 scsi_host_put(ap->scsi_host);
5715 if (host->ops->host_stop)
5716 host->ops->host_stop(host);
5722 * ata_scsi_release - SCSI layer callback hook for host unload
5723 * @host: libata host to be unloaded
5725 * Performs all duties necessary to shut down a libata port...
5726 * Kill port kthread, disable port, and release resources.
5729 * Inherited from SCSI layer.
5735 int ata_scsi_release(struct Scsi_Host *shost)
5737 struct ata_port *ap = ata_shost_to_port(shost);
5741 ap->ops->port_disable(ap);
5742 ap->ops->port_stop(ap);
5748 struct ata_probe_ent *
5749 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5751 struct ata_probe_ent *probe_ent;
5753 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5755 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5756 kobject_name(&(dev->kobj)));
5760 INIT_LIST_HEAD(&probe_ent->node);
5761 probe_ent->dev = dev;
5763 probe_ent->sht = port->sht;
5764 probe_ent->port_flags = port->flags;
5765 probe_ent->pio_mask = port->pio_mask;
5766 probe_ent->mwdma_mask = port->mwdma_mask;
5767 probe_ent->udma_mask = port->udma_mask;
5768 probe_ent->port_ops = port->port_ops;
5774 * ata_std_ports - initialize ioaddr with standard port offsets.
5775 * @ioaddr: IO address structure to be initialized
5777 * Utility function which initializes data_addr, error_addr,
5778 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5779 * device_addr, status_addr, and command_addr to standard offsets
5780 * relative to cmd_addr.
5782 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5785 void ata_std_ports(struct ata_ioports *ioaddr)
5787 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5788 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5789 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5790 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5791 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5792 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5793 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5794 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5795 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5796 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5802 void ata_pci_host_stop (struct ata_host *host)
5804 struct pci_dev *pdev = to_pci_dev(host->dev);
5806 pci_iounmap(pdev, host->mmio_base);
5810 * ata_pci_remove_one - PCI layer callback for device removal
5811 * @pdev: PCI device that was removed
5813 * PCI layer indicates to libata via this hook that
5814 * hot-unplug or module unload event has occurred.
5815 * Handle this by unregistering all objects associated
5816 * with this PCI device. Free those objects. Then finally
5817 * release PCI resources and disable device.
5820 * Inherited from PCI layer (may sleep).
5823 void ata_pci_remove_one (struct pci_dev *pdev)
5825 struct device *dev = pci_dev_to_dev(pdev);
5826 struct ata_host *host = dev_get_drvdata(dev);
5828 ata_host_remove(host);
5830 pci_release_regions(pdev);
5831 pci_disable_device(pdev);
5832 dev_set_drvdata(dev, NULL);
5835 /* move to PCI subsystem */
5836 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5838 unsigned long tmp = 0;
5840 switch (bits->width) {
5843 pci_read_config_byte(pdev, bits->reg, &tmp8);
5849 pci_read_config_word(pdev, bits->reg, &tmp16);
5855 pci_read_config_dword(pdev, bits->reg, &tmp32);
5866 return (tmp == bits->val) ? 1 : 0;
5869 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5871 pci_save_state(pdev);
5873 if (mesg.event == PM_EVENT_SUSPEND) {
5874 pci_disable_device(pdev);
5875 pci_set_power_state(pdev, PCI_D3hot);
5879 void ata_pci_device_do_resume(struct pci_dev *pdev)
5881 pci_set_power_state(pdev, PCI_D0);
5882 pci_restore_state(pdev);
5883 pci_enable_device(pdev);
5884 pci_set_master(pdev);
5887 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5889 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5892 rc = ata_host_suspend(host, mesg);
5896 ata_pci_device_do_suspend(pdev, mesg);
5901 int ata_pci_device_resume(struct pci_dev *pdev)
5903 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5905 ata_pci_device_do_resume(pdev);
5906 ata_host_resume(host);
5909 #endif /* CONFIG_PCI */
5912 static int __init ata_init(void)
5914 ata_probe_timeout *= HZ;
5915 ata_wq = create_workqueue("ata");
5919 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5921 destroy_workqueue(ata_wq);
5925 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5929 static void __exit ata_exit(void)
5931 destroy_workqueue(ata_wq);
5932 destroy_workqueue(ata_aux_wq);
5935 module_init(ata_init);
5936 module_exit(ata_exit);
5938 static unsigned long ratelimit_time;
5939 static DEFINE_SPINLOCK(ata_ratelimit_lock);
5941 int ata_ratelimit(void)
5944 unsigned long flags;
5946 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5948 if (time_after(jiffies, ratelimit_time)) {
5950 ratelimit_time = jiffies + (HZ/5);
5954 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5960 * ata_wait_register - wait until register value changes
5961 * @reg: IO-mapped register
5962 * @mask: Mask to apply to read register value
5963 * @val: Wait condition
5964 * @interval_msec: polling interval in milliseconds
5965 * @timeout_msec: timeout in milliseconds
5967 * Waiting for some bits of register to change is a common
5968 * operation for ATA controllers. This function reads 32bit LE
5969 * IO-mapped register @reg and tests for the following condition.
5971 * (*@reg & mask) != val
5973 * If the condition is met, it returns; otherwise, the process is
5974 * repeated after @interval_msec until timeout.
5977 * Kernel thread context (may sleep)
5980 * The final register value.
5982 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5983 unsigned long interval_msec,
5984 unsigned long timeout_msec)
5986 unsigned long timeout;
5989 tmp = ioread32(reg);
5991 /* Calculate timeout _after_ the first read to make sure
5992 * preceding writes reach the controller before starting to
5993 * eat away the timeout.
5995 timeout = jiffies + (timeout_msec * HZ) / 1000;
5997 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5998 msleep(interval_msec);
5999 tmp = ioread32(reg);
6008 static void ata_dummy_noret(struct ata_port *ap) { }
6009 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6010 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6012 static u8 ata_dummy_check_status(struct ata_port *ap)
6017 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6019 return AC_ERR_SYSTEM;
6022 const struct ata_port_operations ata_dummy_port_ops = {
6023 .port_disable = ata_port_disable,
6024 .check_status = ata_dummy_check_status,
6025 .check_altstatus = ata_dummy_check_status,
6026 .dev_select = ata_noop_dev_select,
6027 .qc_prep = ata_noop_qc_prep,
6028 .qc_issue = ata_dummy_qc_issue,
6029 .freeze = ata_dummy_noret,
6030 .thaw = ata_dummy_noret,
6031 .error_handler = ata_dummy_noret,
6032 .post_internal_cmd = ata_dummy_qc_noret,
6033 .irq_clear = ata_dummy_noret,
6034 .port_start = ata_dummy_ret0,
6035 .port_stop = ata_dummy_noret,
6039 * libata is essentially a library of internal helper functions for
6040 * low-level ATA host controller drivers. As such, the API/ABI is
6041 * likely to change as new drivers are added and updated.
6042 * Do not depend on ABI/API stability.
6045 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6046 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6047 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6048 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6049 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6050 EXPORT_SYMBOL_GPL(ata_std_ports);
6051 EXPORT_SYMBOL_GPL(ata_host_init);
6052 EXPORT_SYMBOL_GPL(ata_device_add);
6053 EXPORT_SYMBOL_GPL(ata_port_detach);
6054 EXPORT_SYMBOL_GPL(ata_host_remove);
6055 EXPORT_SYMBOL_GPL(ata_sg_init);
6056 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6057 EXPORT_SYMBOL_GPL(ata_hsm_move);
6058 EXPORT_SYMBOL_GPL(ata_qc_complete);
6059 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6060 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6061 EXPORT_SYMBOL_GPL(ata_tf_load);
6062 EXPORT_SYMBOL_GPL(ata_tf_read);
6063 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6064 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6065 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6066 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6067 EXPORT_SYMBOL_GPL(ata_check_status);
6068 EXPORT_SYMBOL_GPL(ata_altstatus);
6069 EXPORT_SYMBOL_GPL(ata_exec_command);
6070 EXPORT_SYMBOL_GPL(ata_port_start);
6071 EXPORT_SYMBOL_GPL(ata_port_stop);
6072 EXPORT_SYMBOL_GPL(ata_host_stop);
6073 EXPORT_SYMBOL_GPL(ata_interrupt);
6074 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6075 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6076 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6077 EXPORT_SYMBOL_GPL(ata_qc_prep);
6078 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6079 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6080 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6081 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6082 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6083 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6084 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6085 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6086 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6087 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6088 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6089 EXPORT_SYMBOL_GPL(ata_port_probe);
6090 EXPORT_SYMBOL_GPL(sata_set_spd);
6091 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6092 EXPORT_SYMBOL_GPL(sata_phy_resume);
6093 EXPORT_SYMBOL_GPL(sata_phy_reset);
6094 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6095 EXPORT_SYMBOL_GPL(ata_bus_reset);
6096 EXPORT_SYMBOL_GPL(ata_std_prereset);
6097 EXPORT_SYMBOL_GPL(ata_std_softreset);
6098 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6099 EXPORT_SYMBOL_GPL(ata_std_postreset);
6100 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
6101 EXPORT_SYMBOL_GPL(ata_dev_classify);
6102 EXPORT_SYMBOL_GPL(ata_dev_pair);
6103 EXPORT_SYMBOL_GPL(ata_port_disable);
6104 EXPORT_SYMBOL_GPL(ata_ratelimit);
6105 EXPORT_SYMBOL_GPL(ata_wait_register);
6106 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6107 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6108 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6109 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6110 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6111 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6112 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6113 EXPORT_SYMBOL_GPL(ata_scsi_release);
6114 EXPORT_SYMBOL_GPL(ata_host_intr);
6115 EXPORT_SYMBOL_GPL(sata_scr_valid);
6116 EXPORT_SYMBOL_GPL(sata_scr_read);
6117 EXPORT_SYMBOL_GPL(sata_scr_write);
6118 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6119 EXPORT_SYMBOL_GPL(ata_port_online);
6120 EXPORT_SYMBOL_GPL(ata_port_offline);
6121 EXPORT_SYMBOL_GPL(ata_host_suspend);
6122 EXPORT_SYMBOL_GPL(ata_host_resume);
6123 EXPORT_SYMBOL_GPL(ata_id_string);
6124 EXPORT_SYMBOL_GPL(ata_id_c_string);
6125 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6127 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6128 EXPORT_SYMBOL_GPL(ata_timing_compute);
6129 EXPORT_SYMBOL_GPL(ata_timing_merge);
6132 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6133 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6134 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6135 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6136 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6137 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6138 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6139 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6140 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6141 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6142 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6143 #endif /* CONFIG_PCI */
6145 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6146 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6148 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6149 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6150 EXPORT_SYMBOL_GPL(ata_port_abort);
6151 EXPORT_SYMBOL_GPL(ata_port_freeze);
6152 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6153 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6154 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6155 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6156 EXPORT_SYMBOL_GPL(ata_do_eh);