2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include "scsi_priv.h"
54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsi_host.h>
56 #include <linux/libata.h>
58 #include <asm/semaphore.h>
59 #include <asm/byteorder.h>
63 /* debounce timing parameters in msecs { interval, duration, timeout } */
64 const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
65 const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
66 const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
68 static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_device *dev);
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
76 struct workqueue_struct *ata_aux_wq;
78 int atapi_enabled = 1;
79 module_param(atapi_enabled, int, 0444);
80 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83 module_param(atapi_dmadir, int, 0444);
84 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87 module_param_named(fua, libata_fua, int, 0444);
88 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
90 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
91 module_param(ata_probe_timeout, int, 0444);
92 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
94 MODULE_AUTHOR("Jeff Garzik");
95 MODULE_DESCRIPTION("Library module for ATA devices");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_VERSION);
101 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
102 * @tf: Taskfile to convert
103 * @fis: Buffer into which data will output
104 * @pmp: Port multiplier port
106 * Converts a standard ATA taskfile to a Serial ATA
107 * FIS structure (Register - Host to Device).
110 * Inherited from caller.
113 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
115 fis[0] = 0x27; /* Register - Host to Device FIS */
116 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
117 bit 7 indicates Command FIS */
118 fis[2] = tf->command;
119 fis[3] = tf->feature;
126 fis[8] = tf->hob_lbal;
127 fis[9] = tf->hob_lbam;
128 fis[10] = tf->hob_lbah;
129 fis[11] = tf->hob_feature;
132 fis[13] = tf->hob_nsect;
143 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
144 * @fis: Buffer from which data will be input
145 * @tf: Taskfile to output
147 * Converts a serial ATA FIS structure to a standard ATA taskfile.
150 * Inherited from caller.
153 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
155 tf->command = fis[2]; /* status */
156 tf->feature = fis[3]; /* error */
163 tf->hob_lbal = fis[8];
164 tf->hob_lbam = fis[9];
165 tf->hob_lbah = fis[10];
168 tf->hob_nsect = fis[13];
171 static const u8 ata_rw_cmds[] = {
175 ATA_CMD_READ_MULTI_EXT,
176 ATA_CMD_WRITE_MULTI_EXT,
180 ATA_CMD_WRITE_MULTI_FUA_EXT,
184 ATA_CMD_PIO_READ_EXT,
185 ATA_CMD_PIO_WRITE_EXT,
198 ATA_CMD_WRITE_FUA_EXT
202 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
203 * @qc: command to examine and configure
205 * Examine the device configuration and tf->flags to calculate
206 * the proper read/write commands and protocol to use.
211 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
213 struct ata_taskfile *tf = &qc->tf;
214 struct ata_device *dev = qc->dev;
217 int index, fua, lba48, write;
219 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
220 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
221 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
223 if (dev->flags & ATA_DFLAG_PIO) {
224 tf->protocol = ATA_PROT_PIO;
225 index = dev->multi_count ? 0 : 8;
226 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
227 /* Unable to use DMA due to host limitation */
228 tf->protocol = ATA_PROT_PIO;
229 index = dev->multi_count ? 0 : 8;
231 tf->protocol = ATA_PROT_DMA;
235 cmd = ata_rw_cmds[index + fua + lba48 + write];
244 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
245 * @pio_mask: pio_mask
246 * @mwdma_mask: mwdma_mask
247 * @udma_mask: udma_mask
249 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
250 * unsigned int xfer_mask.
258 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
259 unsigned int mwdma_mask,
260 unsigned int udma_mask)
262 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
263 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
264 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
268 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
269 * @xfer_mask: xfer_mask to unpack
270 * @pio_mask: resulting pio_mask
271 * @mwdma_mask: resulting mwdma_mask
272 * @udma_mask: resulting udma_mask
274 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
275 * Any NULL distination masks will be ignored.
277 static void ata_unpack_xfermask(unsigned int xfer_mask,
278 unsigned int *pio_mask,
279 unsigned int *mwdma_mask,
280 unsigned int *udma_mask)
283 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
285 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
287 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
290 static const struct ata_xfer_ent {
294 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
295 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
296 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
301 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
302 * @xfer_mask: xfer_mask of interest
304 * Return matching XFER_* value for @xfer_mask. Only the highest
305 * bit of @xfer_mask is considered.
311 * Matching XFER_* value, 0 if no match found.
313 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
315 int highbit = fls(xfer_mask) - 1;
316 const struct ata_xfer_ent *ent;
318 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
319 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
320 return ent->base + highbit - ent->shift;
325 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
326 * @xfer_mode: XFER_* of interest
328 * Return matching xfer_mask for @xfer_mode.
334 * Matching xfer_mask, 0 if no match found.
336 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
338 const struct ata_xfer_ent *ent;
340 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
341 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
342 return 1 << (ent->shift + xfer_mode - ent->base);
347 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
348 * @xfer_mode: XFER_* of interest
350 * Return matching xfer_shift for @xfer_mode.
356 * Matching xfer_shift, -1 if no match found.
358 static int ata_xfer_mode2shift(unsigned int xfer_mode)
360 const struct ata_xfer_ent *ent;
362 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
363 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
369 * ata_mode_string - convert xfer_mask to string
370 * @xfer_mask: mask of bits supported; only highest bit counts.
372 * Determine string which represents the highest speed
373 * (highest bit in @modemask).
379 * Constant C string representing highest speed listed in
380 * @mode_mask, or the constant C string "<n/a>".
382 static const char *ata_mode_string(unsigned int xfer_mask)
384 static const char * const xfer_mode_str[] = {
404 highbit = fls(xfer_mask) - 1;
405 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
406 return xfer_mode_str[highbit];
410 static const char *sata_spd_string(unsigned int spd)
412 static const char * const spd_str[] = {
417 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
419 return spd_str[spd - 1];
422 void ata_dev_disable(struct ata_device *dev)
424 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
425 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
431 * ata_pio_devchk - PATA device presence detection
432 * @ap: ATA channel to examine
433 * @device: Device to examine (starting at zero)
435 * This technique was originally described in
436 * Hale Landis's ATADRVR (www.ata-atapi.com), and
437 * later found its way into the ATA/ATAPI spec.
439 * Write a pattern to the ATA shadow registers,
440 * and if a device is present, it will respond by
441 * correctly storing and echoing back the
442 * ATA shadow register contents.
448 static unsigned int ata_pio_devchk(struct ata_port *ap,
451 struct ata_ioports *ioaddr = &ap->ioaddr;
454 ap->ops->dev_select(ap, device);
456 outb(0x55, ioaddr->nsect_addr);
457 outb(0xaa, ioaddr->lbal_addr);
459 outb(0xaa, ioaddr->nsect_addr);
460 outb(0x55, ioaddr->lbal_addr);
462 outb(0x55, ioaddr->nsect_addr);
463 outb(0xaa, ioaddr->lbal_addr);
465 nsect = inb(ioaddr->nsect_addr);
466 lbal = inb(ioaddr->lbal_addr);
468 if ((nsect == 0x55) && (lbal == 0xaa))
469 return 1; /* we found a device */
471 return 0; /* nothing found */
475 * ata_mmio_devchk - PATA device presence detection
476 * @ap: ATA channel to examine
477 * @device: Device to examine (starting at zero)
479 * This technique was originally described in
480 * Hale Landis's ATADRVR (www.ata-atapi.com), and
481 * later found its way into the ATA/ATAPI spec.
483 * Write a pattern to the ATA shadow registers,
484 * and if a device is present, it will respond by
485 * correctly storing and echoing back the
486 * ATA shadow register contents.
492 static unsigned int ata_mmio_devchk(struct ata_port *ap,
495 struct ata_ioports *ioaddr = &ap->ioaddr;
498 ap->ops->dev_select(ap, device);
500 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
501 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
503 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
506 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
512 if ((nsect == 0x55) && (lbal == 0xaa))
513 return 1; /* we found a device */
515 return 0; /* nothing found */
519 * ata_devchk - PATA device presence detection
520 * @ap: ATA channel to examine
521 * @device: Device to examine (starting at zero)
523 * Dispatch ATA device presence detection, depending
524 * on whether we are using PIO or MMIO to talk to the
525 * ATA shadow registers.
531 static unsigned int ata_devchk(struct ata_port *ap,
534 if (ap->flags & ATA_FLAG_MMIO)
535 return ata_mmio_devchk(ap, device);
536 return ata_pio_devchk(ap, device);
540 * ata_dev_classify - determine device type based on ATA-spec signature
541 * @tf: ATA taskfile register set for device to be identified
543 * Determine from taskfile register contents whether a device is
544 * ATA or ATAPI, as per "Signature and persistence" section
545 * of ATA/PI spec (volume 1, sect 5.14).
551 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
552 * the event of failure.
555 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
557 /* Apple's open source Darwin code hints that some devices only
558 * put a proper signature into the LBA mid/high registers,
559 * So, we only check those. It's sufficient for uniqueness.
562 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
563 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
564 DPRINTK("found ATA device by sig\n");
568 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
569 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
570 DPRINTK("found ATAPI device by sig\n");
571 return ATA_DEV_ATAPI;
574 DPRINTK("unknown device\n");
575 return ATA_DEV_UNKNOWN;
579 * ata_dev_try_classify - Parse returned ATA device signature
580 * @ap: ATA channel to examine
581 * @device: Device to examine (starting at zero)
582 * @r_err: Value of error register on completion
584 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
585 * an ATA/ATAPI-defined set of values is placed in the ATA
586 * shadow registers, indicating the results of device detection
589 * Select the ATA device, and read the values from the ATA shadow
590 * registers. Then parse according to the Error register value,
591 * and the spec-defined values examined by ata_dev_classify().
597 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
601 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
603 struct ata_taskfile tf;
607 ap->ops->dev_select(ap, device);
609 memset(&tf, 0, sizeof(tf));
611 ap->ops->tf_read(ap, &tf);
616 /* see if device passed diags */
619 else if ((device == 0) && (err == 0x81))
624 /* determine if device is ATA or ATAPI */
625 class = ata_dev_classify(&tf);
627 if (class == ATA_DEV_UNKNOWN)
629 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
635 * ata_id_string - Convert IDENTIFY DEVICE page into string
636 * @id: IDENTIFY DEVICE results we will examine
637 * @s: string into which data is output
638 * @ofs: offset into identify device page
639 * @len: length of string to return. must be an even number.
641 * The strings in the IDENTIFY DEVICE page are broken up into
642 * 16-bit chunks. Run through the string, and output each
643 * 8-bit chunk linearly, regardless of platform.
649 void ata_id_string(const u16 *id, unsigned char *s,
650 unsigned int ofs, unsigned int len)
669 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
670 * @id: IDENTIFY DEVICE results we will examine
671 * @s: string into which data is output
672 * @ofs: offset into identify device page
673 * @len: length of string to return. must be an odd number.
675 * This function is identical to ata_id_string except that it
676 * trims trailing spaces and terminates the resulting string with
677 * null. @len must be actual maximum length (even number) + 1.
682 void ata_id_c_string(const u16 *id, unsigned char *s,
683 unsigned int ofs, unsigned int len)
689 ata_id_string(id, s, ofs, len - 1);
691 p = s + strnlen(s, len - 1);
692 while (p > s && p[-1] == ' ')
697 static u64 ata_id_n_sectors(const u16 *id)
699 if (ata_id_has_lba(id)) {
700 if (ata_id_has_lba48(id))
701 return ata_id_u64(id, 100);
703 return ata_id_u32(id, 60);
705 if (ata_id_current_chs_valid(id))
706 return ata_id_u32(id, 57);
708 return id[1] * id[3] * id[6];
713 * ata_noop_dev_select - Select device 0/1 on ATA bus
714 * @ap: ATA channel to manipulate
715 * @device: ATA device (numbered from zero) to select
717 * This function performs no actual function.
719 * May be used as the dev_select() entry in ata_port_operations.
724 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
730 * ata_std_dev_select - Select device 0/1 on ATA bus
731 * @ap: ATA channel to manipulate
732 * @device: ATA device (numbered from zero) to select
734 * Use the method defined in the ATA specification to
735 * make either device 0, or device 1, active on the
736 * ATA channel. Works with both PIO and MMIO.
738 * May be used as the dev_select() entry in ata_port_operations.
744 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
749 tmp = ATA_DEVICE_OBS;
751 tmp = ATA_DEVICE_OBS | ATA_DEV1;
753 if (ap->flags & ATA_FLAG_MMIO) {
754 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
756 outb(tmp, ap->ioaddr.device_addr);
758 ata_pause(ap); /* needed; also flushes, for mmio */
762 * ata_dev_select - Select device 0/1 on ATA bus
763 * @ap: ATA channel to manipulate
764 * @device: ATA device (numbered from zero) to select
765 * @wait: non-zero to wait for Status register BSY bit to clear
766 * @can_sleep: non-zero if context allows sleeping
768 * Use the method defined in the ATA specification to
769 * make either device 0, or device 1, active on the
772 * This is a high-level version of ata_std_dev_select(),
773 * which additionally provides the services of inserting
774 * the proper pauses and status polling, where needed.
780 void ata_dev_select(struct ata_port *ap, unsigned int device,
781 unsigned int wait, unsigned int can_sleep)
783 if (ata_msg_probe(ap))
784 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
785 "device %u, wait %u\n", ap->id, device, wait);
790 ap->ops->dev_select(ap, device);
793 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
800 * ata_dump_id - IDENTIFY DEVICE info debugging output
801 * @id: IDENTIFY DEVICE page to dump
803 * Dump selected 16-bit words from the given IDENTIFY DEVICE
810 static inline void ata_dump_id(const u16 *id)
812 DPRINTK("49==0x%04x "
822 DPRINTK("80==0x%04x "
832 DPRINTK("88==0x%04x "
839 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
840 * @id: IDENTIFY data to compute xfer mask from
842 * Compute the xfermask for this device. This is not as trivial
843 * as it seems if we must consider early devices correctly.
845 * FIXME: pre IDE drive timing (do we care ?).
853 static unsigned int ata_id_xfermask(const u16 *id)
855 unsigned int pio_mask, mwdma_mask, udma_mask;
857 /* Usual case. Word 53 indicates word 64 is valid */
858 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
859 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
863 /* If word 64 isn't valid then Word 51 high byte holds
864 * the PIO timing number for the maximum. Turn it into
867 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
869 /* But wait.. there's more. Design your standards by
870 * committee and you too can get a free iordy field to
871 * process. However its the speeds not the modes that
872 * are supported... Note drivers using the timing API
873 * will get this right anyway
877 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
880 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
881 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
883 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
887 * ata_port_queue_task - Queue port_task
888 * @ap: The ata_port to queue port_task for
889 * @fn: workqueue function to be scheduled
890 * @data: data value to pass to workqueue function
891 * @delay: delay time for workqueue function
893 * Schedule @fn(@data) for execution after @delay jiffies using
894 * port_task. There is one port_task per port and it's the
895 * user(low level driver)'s responsibility to make sure that only
896 * one task is active at any given time.
898 * libata core layer takes care of synchronization between
899 * port_task and EH. ata_port_queue_task() may be ignored for EH
903 * Inherited from caller.
905 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
910 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
913 PREPARE_WORK(&ap->port_task, fn, data);
916 rc = queue_work(ata_wq, &ap->port_task);
918 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
920 /* rc == 0 means that another user is using port task */
925 * ata_port_flush_task - Flush port_task
926 * @ap: The ata_port to flush port_task for
928 * After this function completes, port_task is guranteed not to
929 * be running or scheduled.
932 * Kernel thread context (may sleep)
934 void ata_port_flush_task(struct ata_port *ap)
940 spin_lock_irqsave(ap->lock, flags);
941 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
942 spin_unlock_irqrestore(ap->lock, flags);
944 DPRINTK("flush #1\n");
945 flush_workqueue(ata_wq);
948 * At this point, if a task is running, it's guaranteed to see
949 * the FLUSH flag; thus, it will never queue pio tasks again.
952 if (!cancel_delayed_work(&ap->port_task)) {
954 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
956 flush_workqueue(ata_wq);
959 spin_lock_irqsave(ap->lock, flags);
960 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
961 spin_unlock_irqrestore(ap->lock, flags);
964 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
967 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
969 struct completion *waiting = qc->private_data;
975 * ata_exec_internal - execute libata internal command
976 * @dev: Device to which the command is sent
977 * @tf: Taskfile registers for the command and the result
978 * @cdb: CDB for packet command
979 * @dma_dir: Data tranfer direction of the command
980 * @buf: Data buffer of the command
981 * @buflen: Length of data buffer
983 * Executes libata internal command with timeout. @tf contains
984 * command on entry and result on return. Timeout and error
985 * conditions are reported via return value. No recovery action
986 * is taken after a command times out. It's caller's duty to
987 * clean up after timeout.
990 * None. Should be called with kernel context, might sleep.
993 * Zero on success, AC_ERR_* mask on failure
995 unsigned ata_exec_internal(struct ata_device *dev,
996 struct ata_taskfile *tf, const u8 *cdb,
997 int dma_dir, void *buf, unsigned int buflen)
999 struct ata_port *ap = dev->ap;
1000 u8 command = tf->command;
1001 struct ata_queued_cmd *qc;
1002 unsigned int tag, preempted_tag;
1003 u32 preempted_sactive, preempted_qc_active;
1004 DECLARE_COMPLETION_ONSTACK(wait);
1005 unsigned long flags;
1006 unsigned int err_mask;
1009 spin_lock_irqsave(ap->lock, flags);
1011 /* no internal command while frozen */
1012 if (ap->pflags & ATA_PFLAG_FROZEN) {
1013 spin_unlock_irqrestore(ap->lock, flags);
1014 return AC_ERR_SYSTEM;
1017 /* initialize internal qc */
1019 /* XXX: Tag 0 is used for drivers with legacy EH as some
1020 * drivers choke if any other tag is given. This breaks
1021 * ata_tag_internal() test for those drivers. Don't use new
1022 * EH stuff without converting to it.
1024 if (ap->ops->error_handler)
1025 tag = ATA_TAG_INTERNAL;
1029 if (test_and_set_bit(tag, &ap->qc_allocated))
1031 qc = __ata_qc_from_tag(ap, tag);
1039 preempted_tag = ap->active_tag;
1040 preempted_sactive = ap->sactive;
1041 preempted_qc_active = ap->qc_active;
1042 ap->active_tag = ATA_TAG_POISON;
1046 /* prepare & issue qc */
1049 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1050 qc->flags |= ATA_QCFLAG_RESULT_TF;
1051 qc->dma_dir = dma_dir;
1052 if (dma_dir != DMA_NONE) {
1053 ata_sg_init_one(qc, buf, buflen);
1054 qc->nsect = buflen / ATA_SECT_SIZE;
1057 qc->private_data = &wait;
1058 qc->complete_fn = ata_qc_complete_internal;
1062 spin_unlock_irqrestore(ap->lock, flags);
1064 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1066 ata_port_flush_task(ap);
1069 spin_lock_irqsave(ap->lock, flags);
1071 /* We're racing with irq here. If we lose, the
1072 * following test prevents us from completing the qc
1073 * twice. If we win, the port is frozen and will be
1074 * cleaned up by ->post_internal_cmd().
1076 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1077 qc->err_mask |= AC_ERR_TIMEOUT;
1079 if (ap->ops->error_handler)
1080 ata_port_freeze(ap);
1082 ata_qc_complete(qc);
1084 if (ata_msg_warn(ap))
1085 ata_dev_printk(dev, KERN_WARNING,
1086 "qc timeout (cmd 0x%x)\n", command);
1089 spin_unlock_irqrestore(ap->lock, flags);
1092 /* do post_internal_cmd */
1093 if (ap->ops->post_internal_cmd)
1094 ap->ops->post_internal_cmd(qc);
1096 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1097 if (ata_msg_warn(ap))
1098 ata_dev_printk(dev, KERN_WARNING,
1099 "zero err_mask for failed "
1100 "internal command, assuming AC_ERR_OTHER\n");
1101 qc->err_mask |= AC_ERR_OTHER;
1105 spin_lock_irqsave(ap->lock, flags);
1107 *tf = qc->result_tf;
1108 err_mask = qc->err_mask;
1111 ap->active_tag = preempted_tag;
1112 ap->sactive = preempted_sactive;
1113 ap->qc_active = preempted_qc_active;
1115 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1116 * Until those drivers are fixed, we detect the condition
1117 * here, fail the command with AC_ERR_SYSTEM and reenable the
1120 * Note that this doesn't change any behavior as internal
1121 * command failure results in disabling the device in the
1122 * higher layer for LLDDs without new reset/EH callbacks.
1124 * Kill the following code as soon as those drivers are fixed.
1126 if (ap->flags & ATA_FLAG_DISABLED) {
1127 err_mask |= AC_ERR_SYSTEM;
1131 spin_unlock_irqrestore(ap->lock, flags);
1137 * ata_do_simple_cmd - execute simple internal command
1138 * @dev: Device to which the command is sent
1139 * @cmd: Opcode to execute
1141 * Execute a 'simple' command, that only consists of the opcode
1142 * 'cmd' itself, without filling any other registers
1145 * Kernel thread context (may sleep).
1148 * Zero on success, AC_ERR_* mask on failure
1150 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1152 struct ata_taskfile tf;
1154 ata_tf_init(dev, &tf);
1157 tf.flags |= ATA_TFLAG_DEVICE;
1158 tf.protocol = ATA_PROT_NODATA;
1160 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1164 * ata_pio_need_iordy - check if iordy needed
1167 * Check if the current speed of the device requires IORDY. Used
1168 * by various controllers for chip configuration.
1171 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1174 int speed = adev->pio_mode - XFER_PIO_0;
1181 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1183 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1184 pio = adev->id[ATA_ID_EIDE_PIO];
1185 /* Is the speed faster than the drive allows non IORDY ? */
1187 /* This is cycle times not frequency - watch the logic! */
1188 if (pio > 240) /* PIO2 is 240nS per cycle */
1197 * ata_dev_read_id - Read ID data from the specified device
1198 * @dev: target device
1199 * @p_class: pointer to class of the target device (may be changed)
1200 * @post_reset: is this read ID post-reset?
1201 * @id: buffer to read IDENTIFY data into
1203 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1204 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1205 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1206 * for pre-ATA4 drives.
1209 * Kernel thread context (may sleep)
1212 * 0 on success, -errno otherwise.
1214 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1215 int post_reset, u16 *id)
1217 struct ata_port *ap = dev->ap;
1218 unsigned int class = *p_class;
1219 struct ata_taskfile tf;
1220 unsigned int err_mask = 0;
1224 if (ata_msg_ctl(ap))
1225 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1226 __FUNCTION__, ap->id, dev->devno);
1228 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1231 ata_tf_init(dev, &tf);
1235 tf.command = ATA_CMD_ID_ATA;
1238 tf.command = ATA_CMD_ID_ATAPI;
1242 reason = "unsupported class";
1246 tf.protocol = ATA_PROT_PIO;
1248 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1249 id, sizeof(id[0]) * ATA_ID_WORDS);
1252 reason = "I/O error";
1256 swap_buf_le16(id, ATA_ID_WORDS);
1259 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1261 reason = "device reports illegal type";
1265 if (post_reset && class == ATA_DEV_ATA) {
1267 * The exact sequence expected by certain pre-ATA4 drives is:
1270 * INITIALIZE DEVICE PARAMETERS
1272 * Some drives were very specific about that exact sequence.
1274 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1275 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1278 reason = "INIT_DEV_PARAMS failed";
1282 /* current CHS translation info (id[53-58]) might be
1283 * changed. reread the identify device info.
1295 if (ata_msg_warn(ap))
1296 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1297 "(%s, err_mask=0x%x)\n", reason, err_mask);
1301 static inline u8 ata_dev_knobble(struct ata_device *dev)
1303 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1306 static void ata_dev_config_ncq(struct ata_device *dev,
1307 char *desc, size_t desc_sz)
1309 struct ata_port *ap = dev->ap;
1310 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1312 if (!ata_id_has_ncq(dev->id)) {
1317 if (ap->flags & ATA_FLAG_NCQ) {
1318 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1319 dev->flags |= ATA_DFLAG_NCQ;
1322 if (hdepth >= ddepth)
1323 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1325 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1328 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1333 ap->host->max_cmd_len = 0;
1334 for (i = 0; i < ATA_MAX_DEVICES; i++)
1335 ap->host->max_cmd_len = max_t(unsigned int,
1336 ap->host->max_cmd_len,
1337 ap->device[i].cdb_len);
1342 * ata_dev_configure - Configure the specified ATA/ATAPI device
1343 * @dev: Target device to configure
1344 * @print_info: Enable device info printout
1346 * Configure @dev according to @dev->id. Generic and low-level
1347 * driver specific fixups are also applied.
1350 * Kernel thread context (may sleep)
1353 * 0 on success, -errno otherwise
1355 int ata_dev_configure(struct ata_device *dev, int print_info)
1357 struct ata_port *ap = dev->ap;
1358 const u16 *id = dev->id;
1359 unsigned int xfer_mask;
1362 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1363 ata_dev_printk(dev, KERN_INFO,
1364 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1365 __FUNCTION__, ap->id, dev->devno);
1369 if (ata_msg_probe(ap))
1370 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1371 __FUNCTION__, ap->id, dev->devno);
1373 /* print device capabilities */
1374 if (ata_msg_probe(ap))
1375 ata_dev_printk(dev, KERN_DEBUG,
1376 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1377 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1379 id[49], id[82], id[83], id[84],
1380 id[85], id[86], id[87], id[88]);
1382 /* initialize to-be-configured parameters */
1383 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1384 dev->max_sectors = 0;
1392 * common ATA, ATAPI feature tests
1395 /* find max transfer mode; for printk only */
1396 xfer_mask = ata_id_xfermask(id);
1398 if (ata_msg_probe(ap))
1401 /* ATA-specific feature tests */
1402 if (dev->class == ATA_DEV_ATA) {
1403 dev->n_sectors = ata_id_n_sectors(id);
1405 if (ata_id_has_lba(id)) {
1406 const char *lba_desc;
1410 dev->flags |= ATA_DFLAG_LBA;
1411 if (ata_id_has_lba48(id)) {
1412 dev->flags |= ATA_DFLAG_LBA48;
1417 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1419 /* print device info to dmesg */
1420 if (ata_msg_info(ap))
1421 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1422 "max %s, %Lu sectors: %s %s\n",
1423 ata_id_major_version(id),
1424 ata_mode_string(xfer_mask),
1425 (unsigned long long)dev->n_sectors,
1426 lba_desc, ncq_desc);
1430 /* Default translation */
1431 dev->cylinders = id[1];
1433 dev->sectors = id[6];
1435 if (ata_id_current_chs_valid(id)) {
1436 /* Current CHS translation is valid. */
1437 dev->cylinders = id[54];
1438 dev->heads = id[55];
1439 dev->sectors = id[56];
1442 /* print device info to dmesg */
1443 if (ata_msg_info(ap))
1444 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1445 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1446 ata_id_major_version(id),
1447 ata_mode_string(xfer_mask),
1448 (unsigned long long)dev->n_sectors,
1449 dev->cylinders, dev->heads,
1453 if (dev->id[59] & 0x100) {
1454 dev->multi_count = dev->id[59] & 0xff;
1455 if (ata_msg_info(ap))
1456 ata_dev_printk(dev, KERN_INFO,
1457 "ata%u: dev %u multi count %u\n",
1458 ap->id, dev->devno, dev->multi_count);
1464 /* ATAPI-specific feature tests */
1465 else if (dev->class == ATA_DEV_ATAPI) {
1466 char *cdb_intr_string = "";
1468 rc = atapi_cdb_len(id);
1469 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1470 if (ata_msg_warn(ap))
1471 ata_dev_printk(dev, KERN_WARNING,
1472 "unsupported CDB len\n");
1476 dev->cdb_len = (unsigned int) rc;
1478 if (ata_id_cdb_intr(dev->id)) {
1479 dev->flags |= ATA_DFLAG_CDB_INTR;
1480 cdb_intr_string = ", CDB intr";
1483 /* print device info to dmesg */
1484 if (ata_msg_info(ap))
1485 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1486 ata_mode_string(xfer_mask),
1490 ata_set_port_max_cmd_len(ap);
1492 /* limit bridge transfers to udma5, 200 sectors */
1493 if (ata_dev_knobble(dev)) {
1494 if (ata_msg_info(ap))
1495 ata_dev_printk(dev, KERN_INFO,
1496 "applying bridge limits\n");
1497 dev->udma_mask &= ATA_UDMA5;
1498 dev->max_sectors = ATA_MAX_SECTORS;
1501 if (ap->ops->dev_config)
1502 ap->ops->dev_config(ap, dev);
1504 if (ata_msg_probe(ap))
1505 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1506 __FUNCTION__, ata_chk_status(ap));
1510 if (ata_msg_probe(ap))
1511 ata_dev_printk(dev, KERN_DEBUG,
1512 "%s: EXIT, err\n", __FUNCTION__);
1517 * ata_bus_probe - Reset and probe ATA bus
1520 * Master ATA bus probing function. Initiates a hardware-dependent
1521 * bus reset, then attempts to identify any devices found on
1525 * PCI/etc. bus probe sem.
1528 * Zero on success, negative errno otherwise.
1531 static int ata_bus_probe(struct ata_port *ap)
1533 unsigned int classes[ATA_MAX_DEVICES];
1534 int tries[ATA_MAX_DEVICES];
1535 int i, rc, down_xfermask;
1536 struct ata_device *dev;
1540 for (i = 0; i < ATA_MAX_DEVICES; i++)
1541 tries[i] = ATA_PROBE_MAX_TRIES;
1546 /* reset and determine device classes */
1547 ap->ops->phy_reset(ap);
1549 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1550 dev = &ap->device[i];
1552 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1553 dev->class != ATA_DEV_UNKNOWN)
1554 classes[dev->devno] = dev->class;
1556 classes[dev->devno] = ATA_DEV_NONE;
1558 dev->class = ATA_DEV_UNKNOWN;
1563 /* after the reset the device state is PIO 0 and the controller
1564 state is undefined. Record the mode */
1566 for (i = 0; i < ATA_MAX_DEVICES; i++)
1567 ap->device[i].pio_mode = XFER_PIO_0;
1569 /* read IDENTIFY page and configure devices */
1570 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1571 dev = &ap->device[i];
1574 dev->class = classes[i];
1576 if (!ata_dev_enabled(dev))
1579 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1583 rc = ata_dev_configure(dev, 1);
1588 /* configure transfer mode */
1589 rc = ata_set_mode(ap, &dev);
1595 for (i = 0; i < ATA_MAX_DEVICES; i++)
1596 if (ata_dev_enabled(&ap->device[i]))
1599 /* no device present, disable port */
1600 ata_port_disable(ap);
1601 ap->ops->port_disable(ap);
1608 tries[dev->devno] = 0;
1611 sata_down_spd_limit(ap);
1614 tries[dev->devno]--;
1615 if (down_xfermask &&
1616 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1617 tries[dev->devno] = 0;
1620 if (!tries[dev->devno]) {
1621 ata_down_xfermask_limit(dev, 1);
1622 ata_dev_disable(dev);
1629 * ata_port_probe - Mark port as enabled
1630 * @ap: Port for which we indicate enablement
1632 * Modify @ap data structure such that the system
1633 * thinks that the entire port is enabled.
1635 * LOCKING: host_set lock, or some other form of
1639 void ata_port_probe(struct ata_port *ap)
1641 ap->flags &= ~ATA_FLAG_DISABLED;
1645 * sata_print_link_status - Print SATA link status
1646 * @ap: SATA port to printk link status about
1648 * This function prints link speed and status of a SATA link.
1653 static void sata_print_link_status(struct ata_port *ap)
1655 u32 sstatus, scontrol, tmp;
1657 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1659 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1661 if (ata_port_online(ap)) {
1662 tmp = (sstatus >> 4) & 0xf;
1663 ata_port_printk(ap, KERN_INFO,
1664 "SATA link up %s (SStatus %X SControl %X)\n",
1665 sata_spd_string(tmp), sstatus, scontrol);
1667 ata_port_printk(ap, KERN_INFO,
1668 "SATA link down (SStatus %X SControl %X)\n",
1674 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1675 * @ap: SATA port associated with target SATA PHY.
1677 * This function issues commands to standard SATA Sxxx
1678 * PHY registers, to wake up the phy (and device), and
1679 * clear any reset condition.
1682 * PCI/etc. bus probe sem.
1685 void __sata_phy_reset(struct ata_port *ap)
1688 unsigned long timeout = jiffies + (HZ * 5);
1690 if (ap->flags & ATA_FLAG_SATA_RESET) {
1691 /* issue phy wake/reset */
1692 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1693 /* Couldn't find anything in SATA I/II specs, but
1694 * AHCI-1.1 10.4.2 says at least 1 ms. */
1697 /* phy wake/clear reset */
1698 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1700 /* wait for phy to become ready, if necessary */
1703 sata_scr_read(ap, SCR_STATUS, &sstatus);
1704 if ((sstatus & 0xf) != 1)
1706 } while (time_before(jiffies, timeout));
1708 /* print link status */
1709 sata_print_link_status(ap);
1711 /* TODO: phy layer with polling, timeouts, etc. */
1712 if (!ata_port_offline(ap))
1715 ata_port_disable(ap);
1717 if (ap->flags & ATA_FLAG_DISABLED)
1720 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1721 ata_port_disable(ap);
1725 ap->cbl = ATA_CBL_SATA;
1729 * sata_phy_reset - Reset SATA bus.
1730 * @ap: SATA port associated with target SATA PHY.
1732 * This function resets the SATA bus, and then probes
1733 * the bus for devices.
1736 * PCI/etc. bus probe sem.
1739 void sata_phy_reset(struct ata_port *ap)
1741 __sata_phy_reset(ap);
1742 if (ap->flags & ATA_FLAG_DISABLED)
1748 * ata_dev_pair - return other device on cable
1751 * Obtain the other device on the same cable, or if none is
1752 * present NULL is returned
1755 struct ata_device *ata_dev_pair(struct ata_device *adev)
1757 struct ata_port *ap = adev->ap;
1758 struct ata_device *pair = &ap->device[1 - adev->devno];
1759 if (!ata_dev_enabled(pair))
1765 * ata_port_disable - Disable port.
1766 * @ap: Port to be disabled.
1768 * Modify @ap data structure such that the system
1769 * thinks that the entire port is disabled, and should
1770 * never attempt to probe or communicate with devices
1773 * LOCKING: host_set lock, or some other form of
1777 void ata_port_disable(struct ata_port *ap)
1779 ap->device[0].class = ATA_DEV_NONE;
1780 ap->device[1].class = ATA_DEV_NONE;
1781 ap->flags |= ATA_FLAG_DISABLED;
1785 * sata_down_spd_limit - adjust SATA spd limit downward
1786 * @ap: Port to adjust SATA spd limit for
1788 * Adjust SATA spd limit of @ap downward. Note that this
1789 * function only adjusts the limit. The change must be applied
1790 * using sata_set_spd().
1793 * Inherited from caller.
1796 * 0 on success, negative errno on failure
1798 int sata_down_spd_limit(struct ata_port *ap)
1800 u32 sstatus, spd, mask;
1803 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1807 mask = ap->sata_spd_limit;
1810 highbit = fls(mask) - 1;
1811 mask &= ~(1 << highbit);
1813 spd = (sstatus >> 4) & 0xf;
1817 mask &= (1 << spd) - 1;
1821 ap->sata_spd_limit = mask;
1823 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1824 sata_spd_string(fls(mask)));
1829 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1833 if (ap->sata_spd_limit == UINT_MAX)
1836 limit = fls(ap->sata_spd_limit);
1838 spd = (*scontrol >> 4) & 0xf;
1839 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1841 return spd != limit;
1845 * sata_set_spd_needed - is SATA spd configuration needed
1846 * @ap: Port in question
1848 * Test whether the spd limit in SControl matches
1849 * @ap->sata_spd_limit. This function is used to determine
1850 * whether hardreset is necessary to apply SATA spd
1854 * Inherited from caller.
1857 * 1 if SATA spd configuration is needed, 0 otherwise.
1859 int sata_set_spd_needed(struct ata_port *ap)
1863 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1866 return __sata_set_spd_needed(ap, &scontrol);
1870 * sata_set_spd - set SATA spd according to spd limit
1871 * @ap: Port to set SATA spd for
1873 * Set SATA spd of @ap according to sata_spd_limit.
1876 * Inherited from caller.
1879 * 0 if spd doesn't need to be changed, 1 if spd has been
1880 * changed. Negative errno if SCR registers are inaccessible.
1882 int sata_set_spd(struct ata_port *ap)
1887 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1890 if (!__sata_set_spd_needed(ap, &scontrol))
1893 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1900 * This mode timing computation functionality is ported over from
1901 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1904 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1905 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1906 * for PIO 5, which is a nonstandard extension and UDMA6, which
1907 * is currently supported only by Maxtor drives.
1910 static const struct ata_timing ata_timing[] = {
1912 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1913 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1914 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1915 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1917 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1918 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1919 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1921 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1923 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1924 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1925 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1927 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1928 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1929 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1931 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1932 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1933 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1935 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1936 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1937 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1939 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1944 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1945 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1947 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1949 q->setup = EZ(t->setup * 1000, T);
1950 q->act8b = EZ(t->act8b * 1000, T);
1951 q->rec8b = EZ(t->rec8b * 1000, T);
1952 q->cyc8b = EZ(t->cyc8b * 1000, T);
1953 q->active = EZ(t->active * 1000, T);
1954 q->recover = EZ(t->recover * 1000, T);
1955 q->cycle = EZ(t->cycle * 1000, T);
1956 q->udma = EZ(t->udma * 1000, UT);
1959 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1960 struct ata_timing *m, unsigned int what)
1962 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1963 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1964 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1965 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1966 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1967 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1968 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1969 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1972 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1974 const struct ata_timing *t;
1976 for (t = ata_timing; t->mode != speed; t++)
1977 if (t->mode == 0xFF)
1982 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1983 struct ata_timing *t, int T, int UT)
1985 const struct ata_timing *s;
1986 struct ata_timing p;
1992 if (!(s = ata_timing_find_mode(speed)))
1995 memcpy(t, s, sizeof(*s));
1998 * If the drive is an EIDE drive, it can tell us it needs extended
1999 * PIO/MW_DMA cycle timing.
2002 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2003 memset(&p, 0, sizeof(p));
2004 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2005 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2006 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2007 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2008 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2010 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2014 * Convert the timing to bus clock counts.
2017 ata_timing_quantize(t, t, T, UT);
2020 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2021 * S.M.A.R.T * and some other commands. We have to ensure that the
2022 * DMA cycle timing is slower/equal than the fastest PIO timing.
2025 if (speed > XFER_PIO_4) {
2026 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2027 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2031 * Lengthen active & recovery time so that cycle time is correct.
2034 if (t->act8b + t->rec8b < t->cyc8b) {
2035 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2036 t->rec8b = t->cyc8b - t->act8b;
2039 if (t->active + t->recover < t->cycle) {
2040 t->active += (t->cycle - (t->active + t->recover)) / 2;
2041 t->recover = t->cycle - t->active;
2048 * ata_down_xfermask_limit - adjust dev xfer masks downward
2049 * @dev: Device to adjust xfer masks
2050 * @force_pio0: Force PIO0
2052 * Adjust xfer masks of @dev downward. Note that this function
2053 * does not apply the change. Invoking ata_set_mode() afterwards
2054 * will apply the limit.
2057 * Inherited from caller.
2060 * 0 on success, negative errno on failure
2062 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2064 unsigned long xfer_mask;
2067 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2072 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2073 if (xfer_mask & ATA_MASK_UDMA)
2074 xfer_mask &= ~ATA_MASK_MWDMA;
2076 highbit = fls(xfer_mask) - 1;
2077 xfer_mask &= ~(1 << highbit);
2079 xfer_mask &= 1 << ATA_SHIFT_PIO;
2083 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2086 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2087 ata_mode_string(xfer_mask));
2095 static int ata_dev_set_mode(struct ata_device *dev)
2097 unsigned int err_mask;
2100 dev->flags &= ~ATA_DFLAG_PIO;
2101 if (dev->xfer_shift == ATA_SHIFT_PIO)
2102 dev->flags |= ATA_DFLAG_PIO;
2104 err_mask = ata_dev_set_xfermode(dev);
2106 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2107 "(err_mask=0x%x)\n", err_mask);
2111 rc = ata_dev_revalidate(dev, 0);
2115 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2116 dev->xfer_shift, (int)dev->xfer_mode);
2118 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2119 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2124 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2125 * @ap: port on which timings will be programmed
2126 * @r_failed_dev: out paramter for failed device
2128 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2129 * ata_set_mode() fails, pointer to the failing device is
2130 * returned in @r_failed_dev.
2133 * PCI/etc. bus probe sem.
2136 * 0 on success, negative errno otherwise
2138 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2140 struct ata_device *dev;
2141 int i, rc = 0, used_dma = 0, found = 0;
2143 /* has private set_mode? */
2144 if (ap->ops->set_mode) {
2145 /* FIXME: make ->set_mode handle no device case and
2146 * return error code and failing device on failure.
2148 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2149 if (ata_dev_enabled(&ap->device[i])) {
2150 ap->ops->set_mode(ap);
2157 /* step 1: calculate xfer_mask */
2158 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2159 unsigned int pio_mask, dma_mask;
2161 dev = &ap->device[i];
2163 if (!ata_dev_enabled(dev))
2166 ata_dev_xfermask(dev);
2168 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2169 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2170 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2171 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2180 /* step 2: always set host PIO timings */
2181 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2182 dev = &ap->device[i];
2183 if (!ata_dev_enabled(dev))
2186 if (!dev->pio_mode) {
2187 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2192 dev->xfer_mode = dev->pio_mode;
2193 dev->xfer_shift = ATA_SHIFT_PIO;
2194 if (ap->ops->set_piomode)
2195 ap->ops->set_piomode(ap, dev);
2198 /* step 3: set host DMA timings */
2199 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2200 dev = &ap->device[i];
2202 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2205 dev->xfer_mode = dev->dma_mode;
2206 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2207 if (ap->ops->set_dmamode)
2208 ap->ops->set_dmamode(ap, dev);
2211 /* step 4: update devices' xfer mode */
2212 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2213 dev = &ap->device[i];
2215 if (!ata_dev_enabled(dev))
2218 rc = ata_dev_set_mode(dev);
2223 /* Record simplex status. If we selected DMA then the other
2224 * host channels are not permitted to do so.
2226 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2227 ap->host_set->simplex_claimed = 1;
2229 /* step5: chip specific finalisation */
2230 if (ap->ops->post_set_mode)
2231 ap->ops->post_set_mode(ap);
2235 *r_failed_dev = dev;
2240 * ata_tf_to_host - issue ATA taskfile to host controller
2241 * @ap: port to which command is being issued
2242 * @tf: ATA taskfile register set
2244 * Issues ATA taskfile register set to ATA host controller,
2245 * with proper synchronization with interrupt handler and
2249 * spin_lock_irqsave(host_set lock)
2252 static inline void ata_tf_to_host(struct ata_port *ap,
2253 const struct ata_taskfile *tf)
2255 ap->ops->tf_load(ap, tf);
2256 ap->ops->exec_command(ap, tf);
2260 * ata_busy_sleep - sleep until BSY clears, or timeout
2261 * @ap: port containing status register to be polled
2262 * @tmout_pat: impatience timeout
2263 * @tmout: overall timeout
2265 * Sleep until ATA Status register bit BSY clears,
2266 * or a timeout occurs.
2271 unsigned int ata_busy_sleep (struct ata_port *ap,
2272 unsigned long tmout_pat, unsigned long tmout)
2274 unsigned long timer_start, timeout;
2277 status = ata_busy_wait(ap, ATA_BUSY, 300);
2278 timer_start = jiffies;
2279 timeout = timer_start + tmout_pat;
2280 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2282 status = ata_busy_wait(ap, ATA_BUSY, 3);
2285 if (status & ATA_BUSY)
2286 ata_port_printk(ap, KERN_WARNING,
2287 "port is slow to respond, please be patient\n");
2289 timeout = timer_start + tmout;
2290 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2292 status = ata_chk_status(ap);
2295 if (status & ATA_BUSY) {
2296 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2297 "(%lu secs)\n", tmout / HZ);
2304 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2306 struct ata_ioports *ioaddr = &ap->ioaddr;
2307 unsigned int dev0 = devmask & (1 << 0);
2308 unsigned int dev1 = devmask & (1 << 1);
2309 unsigned long timeout;
2311 /* if device 0 was found in ata_devchk, wait for its
2315 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2317 /* if device 1 was found in ata_devchk, wait for
2318 * register access, then wait for BSY to clear
2320 timeout = jiffies + ATA_TMOUT_BOOT;
2324 ap->ops->dev_select(ap, 1);
2325 if (ap->flags & ATA_FLAG_MMIO) {
2326 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2327 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2329 nsect = inb(ioaddr->nsect_addr);
2330 lbal = inb(ioaddr->lbal_addr);
2332 if ((nsect == 1) && (lbal == 1))
2334 if (time_after(jiffies, timeout)) {
2338 msleep(50); /* give drive a breather */
2341 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2343 /* is all this really necessary? */
2344 ap->ops->dev_select(ap, 0);
2346 ap->ops->dev_select(ap, 1);
2348 ap->ops->dev_select(ap, 0);
2351 static unsigned int ata_bus_softreset(struct ata_port *ap,
2352 unsigned int devmask)
2354 struct ata_ioports *ioaddr = &ap->ioaddr;
2356 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2358 /* software reset. causes dev0 to be selected */
2359 if (ap->flags & ATA_FLAG_MMIO) {
2360 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2361 udelay(20); /* FIXME: flush */
2362 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2363 udelay(20); /* FIXME: flush */
2364 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2366 outb(ap->ctl, ioaddr->ctl_addr);
2368 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2370 outb(ap->ctl, ioaddr->ctl_addr);
2373 /* spec mandates ">= 2ms" before checking status.
2374 * We wait 150ms, because that was the magic delay used for
2375 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2376 * between when the ATA command register is written, and then
2377 * status is checked. Because waiting for "a while" before
2378 * checking status is fine, post SRST, we perform this magic
2379 * delay here as well.
2381 * Old drivers/ide uses the 2mS rule and then waits for ready
2385 /* Before we perform post reset processing we want to see if
2386 * the bus shows 0xFF because the odd clown forgets the D7
2387 * pulldown resistor.
2389 if (ata_check_status(ap) == 0xFF) {
2390 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2391 return AC_ERR_OTHER;
2394 ata_bus_post_reset(ap, devmask);
2400 * ata_bus_reset - reset host port and associated ATA channel
2401 * @ap: port to reset
2403 * This is typically the first time we actually start issuing
2404 * commands to the ATA channel. We wait for BSY to clear, then
2405 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2406 * result. Determine what devices, if any, are on the channel
2407 * by looking at the device 0/1 error register. Look at the signature
2408 * stored in each device's taskfile registers, to determine if
2409 * the device is ATA or ATAPI.
2412 * PCI/etc. bus probe sem.
2413 * Obtains host_set lock.
2416 * Sets ATA_FLAG_DISABLED if bus reset fails.
2419 void ata_bus_reset(struct ata_port *ap)
2421 struct ata_ioports *ioaddr = &ap->ioaddr;
2422 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2424 unsigned int dev0, dev1 = 0, devmask = 0;
2426 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2428 /* determine if device 0/1 are present */
2429 if (ap->flags & ATA_FLAG_SATA_RESET)
2432 dev0 = ata_devchk(ap, 0);
2434 dev1 = ata_devchk(ap, 1);
2438 devmask |= (1 << 0);
2440 devmask |= (1 << 1);
2442 /* select device 0 again */
2443 ap->ops->dev_select(ap, 0);
2445 /* issue bus reset */
2446 if (ap->flags & ATA_FLAG_SRST)
2447 if (ata_bus_softreset(ap, devmask))
2451 * determine by signature whether we have ATA or ATAPI devices
2453 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2454 if ((slave_possible) && (err != 0x81))
2455 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2457 /* re-enable interrupts */
2458 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2461 /* is double-select really necessary? */
2462 if (ap->device[1].class != ATA_DEV_NONE)
2463 ap->ops->dev_select(ap, 1);
2464 if (ap->device[0].class != ATA_DEV_NONE)
2465 ap->ops->dev_select(ap, 0);
2467 /* if no devices were detected, disable this port */
2468 if ((ap->device[0].class == ATA_DEV_NONE) &&
2469 (ap->device[1].class == ATA_DEV_NONE))
2472 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2473 /* set up device control for ATA_FLAG_SATA_RESET */
2474 if (ap->flags & ATA_FLAG_MMIO)
2475 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2477 outb(ap->ctl, ioaddr->ctl_addr);
2484 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2485 ap->ops->port_disable(ap);
2491 * sata_phy_debounce - debounce SATA phy status
2492 * @ap: ATA port to debounce SATA phy status for
2493 * @params: timing parameters { interval, duratinon, timeout } in msec
2495 * Make sure SStatus of @ap reaches stable state, determined by
2496 * holding the same value where DET is not 1 for @duration polled
2497 * every @interval, before @timeout. Timeout constraints the
2498 * beginning of the stable state. Because, after hot unplugging,
2499 * DET gets stuck at 1 on some controllers, this functions waits
2500 * until timeout then returns 0 if DET is stable at 1.
2503 * Kernel thread context (may sleep)
2506 * 0 on success, -errno on failure.
2508 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2510 unsigned long interval_msec = params[0];
2511 unsigned long duration = params[1] * HZ / 1000;
2512 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2513 unsigned long last_jiffies;
2517 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2522 last_jiffies = jiffies;
2525 msleep(interval_msec);
2526 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2532 if (cur == 1 && time_before(jiffies, timeout))
2534 if (time_after(jiffies, last_jiffies + duration))
2539 /* unstable, start over */
2541 last_jiffies = jiffies;
2544 if (time_after(jiffies, timeout))
2550 * sata_phy_resume - resume SATA phy
2551 * @ap: ATA port to resume SATA phy for
2552 * @params: timing parameters { interval, duratinon, timeout } in msec
2554 * Resume SATA phy of @ap and debounce it.
2557 * Kernel thread context (may sleep)
2560 * 0 on success, -errno on failure.
2562 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2567 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2570 scontrol = (scontrol & 0x0f0) | 0x300;
2572 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2575 /* Some PHYs react badly if SStatus is pounded immediately
2576 * after resuming. Delay 200ms before debouncing.
2580 return sata_phy_debounce(ap, params);
2583 static void ata_wait_spinup(struct ata_port *ap)
2585 struct ata_eh_context *ehc = &ap->eh_context;
2586 unsigned long end, secs;
2589 /* first, debounce phy if SATA */
2590 if (ap->cbl == ATA_CBL_SATA) {
2591 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2593 /* if debounced successfully and offline, no need to wait */
2594 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2598 /* okay, let's give the drive time to spin up */
2599 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2600 secs = ((end - jiffies) + HZ - 1) / HZ;
2602 if (time_after(jiffies, end))
2606 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2607 "(%lu secs)\n", secs);
2609 schedule_timeout_uninterruptible(end - jiffies);
2613 * ata_std_prereset - prepare for reset
2614 * @ap: ATA port to be reset
2616 * @ap is about to be reset. Initialize it.
2619 * Kernel thread context (may sleep)
2622 * 0 on success, -errno otherwise.
2624 int ata_std_prereset(struct ata_port *ap)
2626 struct ata_eh_context *ehc = &ap->eh_context;
2627 const unsigned long *timing;
2630 /* handle link resume & hotplug spinup */
2631 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2632 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2633 ehc->i.action |= ATA_EH_HARDRESET;
2635 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2636 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2637 ata_wait_spinup(ap);
2639 /* if we're about to do hardreset, nothing more to do */
2640 if (ehc->i.action & ATA_EH_HARDRESET)
2643 /* if SATA, resume phy */
2644 if (ap->cbl == ATA_CBL_SATA) {
2645 if (ap->pflags & ATA_PFLAG_LOADING)
2646 timing = sata_deb_timing_boot;
2648 timing = sata_deb_timing_eh;
2650 rc = sata_phy_resume(ap, timing);
2651 if (rc && rc != -EOPNOTSUPP) {
2652 /* phy resume failed */
2653 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2654 "link for reset (errno=%d)\n", rc);
2659 /* Wait for !BSY if the controller can wait for the first D2H
2660 * Reg FIS and we don't know that no device is attached.
2662 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2663 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2669 * ata_std_softreset - reset host port via ATA SRST
2670 * @ap: port to reset
2671 * @classes: resulting classes of attached devices
2673 * Reset host port using ATA SRST.
2676 * Kernel thread context (may sleep)
2679 * 0 on success, -errno otherwise.
2681 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2683 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2684 unsigned int devmask = 0, err_mask;
2689 if (ata_port_offline(ap)) {
2690 classes[0] = ATA_DEV_NONE;
2694 /* determine if device 0/1 are present */
2695 if (ata_devchk(ap, 0))
2696 devmask |= (1 << 0);
2697 if (slave_possible && ata_devchk(ap, 1))
2698 devmask |= (1 << 1);
2700 /* select device 0 again */
2701 ap->ops->dev_select(ap, 0);
2703 /* issue bus reset */
2704 DPRINTK("about to softreset, devmask=%x\n", devmask);
2705 err_mask = ata_bus_softreset(ap, devmask);
2707 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2712 /* determine by signature whether we have ATA or ATAPI devices */
2713 classes[0] = ata_dev_try_classify(ap, 0, &err);
2714 if (slave_possible && err != 0x81)
2715 classes[1] = ata_dev_try_classify(ap, 1, &err);
2718 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2723 * sata_std_hardreset - reset host port via SATA phy reset
2724 * @ap: port to reset
2725 * @class: resulting class of attached device
2727 * SATA phy-reset host port using DET bits of SControl register.
2730 * Kernel thread context (may sleep)
2733 * 0 on success, -errno otherwise.
2735 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2742 if (sata_set_spd_needed(ap)) {
2743 /* SATA spec says nothing about how to reconfigure
2744 * spd. To be on the safe side, turn off phy during
2745 * reconfiguration. This works for at least ICH7 AHCI
2748 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2751 scontrol = (scontrol & 0x0f0) | 0x302;
2753 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2759 /* issue phy wake/reset */
2760 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2763 scontrol = (scontrol & 0x0f0) | 0x301;
2765 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2768 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2769 * 10.4.2 says at least 1 ms.
2773 /* bring phy back */
2774 sata_phy_resume(ap, sata_deb_timing_eh);
2776 /* TODO: phy layer with polling, timeouts, etc. */
2777 if (ata_port_offline(ap)) {
2778 *class = ATA_DEV_NONE;
2779 DPRINTK("EXIT, link offline\n");
2783 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2784 ata_port_printk(ap, KERN_ERR,
2785 "COMRESET failed (device not ready)\n");
2789 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2791 *class = ata_dev_try_classify(ap, 0, NULL);
2793 DPRINTK("EXIT, class=%u\n", *class);
2798 * ata_std_postreset - standard postreset callback
2799 * @ap: the target ata_port
2800 * @classes: classes of attached devices
2802 * This function is invoked after a successful reset. Note that
2803 * the device might have been reset more than once using
2804 * different reset methods before postreset is invoked.
2807 * Kernel thread context (may sleep)
2809 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2815 /* print link status */
2816 sata_print_link_status(ap);
2819 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2820 sata_scr_write(ap, SCR_ERROR, serror);
2822 /* re-enable interrupts */
2823 if (!ap->ops->error_handler) {
2824 /* FIXME: hack. create a hook instead */
2825 if (ap->ioaddr.ctl_addr)
2829 /* is double-select really necessary? */
2830 if (classes[0] != ATA_DEV_NONE)
2831 ap->ops->dev_select(ap, 1);
2832 if (classes[1] != ATA_DEV_NONE)
2833 ap->ops->dev_select(ap, 0);
2835 /* bail out if no device is present */
2836 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2837 DPRINTK("EXIT, no device\n");
2841 /* set up device control */
2842 if (ap->ioaddr.ctl_addr) {
2843 if (ap->flags & ATA_FLAG_MMIO)
2844 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2846 outb(ap->ctl, ap->ioaddr.ctl_addr);
2853 * ata_dev_same_device - Determine whether new ID matches configured device
2854 * @dev: device to compare against
2855 * @new_class: class of the new device
2856 * @new_id: IDENTIFY page of the new device
2858 * Compare @new_class and @new_id against @dev and determine
2859 * whether @dev is the device indicated by @new_class and
2866 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2868 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2871 const u16 *old_id = dev->id;
2872 unsigned char model[2][41], serial[2][21];
2875 if (dev->class != new_class) {
2876 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2877 dev->class, new_class);
2881 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2882 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2883 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2884 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2885 new_n_sectors = ata_id_n_sectors(new_id);
2887 if (strcmp(model[0], model[1])) {
2888 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2889 "'%s' != '%s'\n", model[0], model[1]);
2893 if (strcmp(serial[0], serial[1])) {
2894 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2895 "'%s' != '%s'\n", serial[0], serial[1]);
2899 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2900 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2902 (unsigned long long)dev->n_sectors,
2903 (unsigned long long)new_n_sectors);
2911 * ata_dev_revalidate - Revalidate ATA device
2912 * @dev: device to revalidate
2913 * @post_reset: is this revalidation after reset?
2915 * Re-read IDENTIFY page and make sure @dev is still attached to
2919 * Kernel thread context (may sleep)
2922 * 0 on success, negative errno otherwise
2924 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2926 unsigned int class = dev->class;
2927 u16 *id = (void *)dev->ap->sector_buf;
2930 if (!ata_dev_enabled(dev)) {
2936 rc = ata_dev_read_id(dev, &class, post_reset, id);
2940 /* is the device still there? */
2941 if (!ata_dev_same_device(dev, class, id)) {
2946 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2948 /* configure device according to the new ID */
2949 rc = ata_dev_configure(dev, 0);
2954 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2958 static const char * const ata_dma_blacklist [] = {
2959 "WDC AC11000H", NULL,
2960 "WDC AC22100H", NULL,
2961 "WDC AC32500H", NULL,
2962 "WDC AC33100H", NULL,
2963 "WDC AC31600H", NULL,
2964 "WDC AC32100H", "24.09P07",
2965 "WDC AC23200L", "21.10N21",
2966 "Compaq CRD-8241B", NULL,
2971 "SanDisk SDP3B", NULL,
2972 "SanDisk SDP3B-64", NULL,
2973 "SANYO CD-ROM CRD", NULL,
2974 "HITACHI CDR-8", NULL,
2975 "HITACHI CDR-8335", NULL,
2976 "HITACHI CDR-8435", NULL,
2977 "Toshiba CD-ROM XM-6202B", NULL,
2978 "TOSHIBA CD-ROM XM-1702BC", NULL,
2980 "E-IDE CD-ROM CR-840", NULL,
2981 "CD-ROM Drive/F5A", NULL,
2982 "WPI CDD-820", NULL,
2983 "SAMSUNG CD-ROM SC-148C", NULL,
2984 "SAMSUNG CD-ROM SC", NULL,
2985 "SanDisk SDP3B-64", NULL,
2986 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2987 "_NEC DV5800A", NULL,
2988 "SAMSUNG CD-ROM SN-124", "N001"
2991 static int ata_strim(char *s, size_t len)
2993 len = strnlen(s, len);
2995 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2996 while ((len > 0) && (s[len - 1] == ' ')) {
3003 static int ata_dma_blacklisted(const struct ata_device *dev)
3005 unsigned char model_num[40];
3006 unsigned char model_rev[16];
3007 unsigned int nlen, rlen;
3010 /* We don't support polling DMA.
3011 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3012 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3014 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3015 (dev->flags & ATA_DFLAG_CDB_INTR))
3018 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3020 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3022 nlen = ata_strim(model_num, sizeof(model_num));
3023 rlen = ata_strim(model_rev, sizeof(model_rev));
3025 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3026 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3027 if (ata_dma_blacklist[i+1] == NULL)
3029 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3037 * ata_dev_xfermask - Compute supported xfermask of the given device
3038 * @dev: Device to compute xfermask for
3040 * Compute supported xfermask of @dev and store it in
3041 * dev->*_mask. This function is responsible for applying all
3042 * known limits including host controller limits, device
3045 * FIXME: The current implementation limits all transfer modes to
3046 * the fastest of the lowested device on the port. This is not
3047 * required on most controllers.
3052 static void ata_dev_xfermask(struct ata_device *dev)
3054 struct ata_port *ap = dev->ap;
3055 struct ata_host_set *hs = ap->host_set;
3056 unsigned long xfer_mask;
3059 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3060 ap->mwdma_mask, ap->udma_mask);
3062 /* Apply cable rule here. Don't apply it early because when
3063 * we handle hot plug the cable type can itself change.
3065 if (ap->cbl == ATA_CBL_PATA40)
3066 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3068 /* FIXME: Use port-wide xfermask for now */
3069 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3070 struct ata_device *d = &ap->device[i];
3072 if (ata_dev_absent(d))
3075 if (ata_dev_disabled(d)) {
3076 /* to avoid violating device selection timing */
3077 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3078 UINT_MAX, UINT_MAX);
3082 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3083 d->mwdma_mask, d->udma_mask);
3084 xfer_mask &= ata_id_xfermask(d->id);
3085 if (ata_dma_blacklisted(d))
3086 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3089 if (ata_dma_blacklisted(dev))
3090 ata_dev_printk(dev, KERN_WARNING,
3091 "device is on DMA blacklist, disabling DMA\n");
3093 if (hs->flags & ATA_HOST_SIMPLEX) {
3094 if (hs->simplex_claimed)
3095 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3098 if (ap->ops->mode_filter)
3099 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3101 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3102 &dev->mwdma_mask, &dev->udma_mask);
3106 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3107 * @dev: Device to which command will be sent
3109 * Issue SET FEATURES - XFER MODE command to device @dev
3113 * PCI/etc. bus probe sem.
3116 * 0 on success, AC_ERR_* mask otherwise.
3119 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3121 struct ata_taskfile tf;
3122 unsigned int err_mask;
3124 /* set up set-features taskfile */
3125 DPRINTK("set features - xfer mode\n");
3127 ata_tf_init(dev, &tf);
3128 tf.command = ATA_CMD_SET_FEATURES;
3129 tf.feature = SETFEATURES_XFER;
3130 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3131 tf.protocol = ATA_PROT_NODATA;
3132 tf.nsect = dev->xfer_mode;
3134 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3136 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3141 * ata_dev_init_params - Issue INIT DEV PARAMS command
3142 * @dev: Device to which command will be sent
3143 * @heads: Number of heads (taskfile parameter)
3144 * @sectors: Number of sectors (taskfile parameter)
3147 * Kernel thread context (may sleep)
3150 * 0 on success, AC_ERR_* mask otherwise.
3152 static unsigned int ata_dev_init_params(struct ata_device *dev,
3153 u16 heads, u16 sectors)
3155 struct ata_taskfile tf;
3156 unsigned int err_mask;
3158 /* Number of sectors per track 1-255. Number of heads 1-16 */
3159 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3160 return AC_ERR_INVALID;
3162 /* set up init dev params taskfile */
3163 DPRINTK("init dev params \n");
3165 ata_tf_init(dev, &tf);
3166 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3167 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3168 tf.protocol = ATA_PROT_NODATA;
3170 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3172 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3174 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3179 * ata_sg_clean - Unmap DMA memory associated with command
3180 * @qc: Command containing DMA memory to be released
3182 * Unmap all mapped DMA memory associated with this command.
3185 * spin_lock_irqsave(host_set lock)
3188 static void ata_sg_clean(struct ata_queued_cmd *qc)
3190 struct ata_port *ap = qc->ap;
3191 struct scatterlist *sg = qc->__sg;
3192 int dir = qc->dma_dir;
3193 void *pad_buf = NULL;
3195 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3196 WARN_ON(sg == NULL);
3198 if (qc->flags & ATA_QCFLAG_SINGLE)
3199 WARN_ON(qc->n_elem > 1);
3201 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3203 /* if we padded the buffer out to 32-bit bound, and data
3204 * xfer direction is from-device, we must copy from the
3205 * pad buffer back into the supplied buffer
3207 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3208 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3210 if (qc->flags & ATA_QCFLAG_SG) {
3212 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3213 /* restore last sg */
3214 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3216 struct scatterlist *psg = &qc->pad_sgent;
3217 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3218 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3219 kunmap_atomic(addr, KM_IRQ0);
3223 dma_unmap_single(ap->dev,
3224 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3227 sg->length += qc->pad_len;
3229 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3230 pad_buf, qc->pad_len);
3233 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3238 * ata_fill_sg - Fill PCI IDE PRD table
3239 * @qc: Metadata associated with taskfile to be transferred
3241 * Fill PCI IDE PRD (scatter-gather) table with segments
3242 * associated with the current disk command.
3245 * spin_lock_irqsave(host_set lock)
3248 static void ata_fill_sg(struct ata_queued_cmd *qc)
3250 struct ata_port *ap = qc->ap;
3251 struct scatterlist *sg;
3254 WARN_ON(qc->__sg == NULL);
3255 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3258 ata_for_each_sg(sg, qc) {
3262 /* determine if physical DMA addr spans 64K boundary.
3263 * Note h/w doesn't support 64-bit, so we unconditionally
3264 * truncate dma_addr_t to u32.
3266 addr = (u32) sg_dma_address(sg);
3267 sg_len = sg_dma_len(sg);
3270 offset = addr & 0xffff;
3272 if ((offset + sg_len) > 0x10000)
3273 len = 0x10000 - offset;
3275 ap->prd[idx].addr = cpu_to_le32(addr);
3276 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3277 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3286 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3289 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3290 * @qc: Metadata associated with taskfile to check
3292 * Allow low-level driver to filter ATA PACKET commands, returning
3293 * a status indicating whether or not it is OK to use DMA for the
3294 * supplied PACKET command.
3297 * spin_lock_irqsave(host_set lock)
3299 * RETURNS: 0 when ATAPI DMA can be used
3302 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3304 struct ata_port *ap = qc->ap;
3305 int rc = 0; /* Assume ATAPI DMA is OK by default */
3307 if (ap->ops->check_atapi_dma)
3308 rc = ap->ops->check_atapi_dma(qc);
3313 * ata_qc_prep - Prepare taskfile for submission
3314 * @qc: Metadata associated with taskfile to be prepared
3316 * Prepare ATA taskfile for submission.
3319 * spin_lock_irqsave(host_set lock)
3321 void ata_qc_prep(struct ata_queued_cmd *qc)
3323 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3329 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3332 * ata_sg_init_one - Associate command with memory buffer
3333 * @qc: Command to be associated
3334 * @buf: Memory buffer
3335 * @buflen: Length of memory buffer, in bytes.
3337 * Initialize the data-related elements of queued_cmd @qc
3338 * to point to a single memory buffer, @buf of byte length @buflen.
3341 * spin_lock_irqsave(host_set lock)
3344 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3346 struct scatterlist *sg;
3348 qc->flags |= ATA_QCFLAG_SINGLE;
3350 memset(&qc->sgent, 0, sizeof(qc->sgent));
3351 qc->__sg = &qc->sgent;
3353 qc->orig_n_elem = 1;
3355 qc->nbytes = buflen;
3358 sg_init_one(sg, buf, buflen);
3362 * ata_sg_init - Associate command with scatter-gather table.
3363 * @qc: Command to be associated
3364 * @sg: Scatter-gather table.
3365 * @n_elem: Number of elements in s/g table.
3367 * Initialize the data-related elements of queued_cmd @qc
3368 * to point to a scatter-gather table @sg, containing @n_elem
3372 * spin_lock_irqsave(host_set lock)
3375 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3376 unsigned int n_elem)
3378 qc->flags |= ATA_QCFLAG_SG;
3380 qc->n_elem = n_elem;
3381 qc->orig_n_elem = n_elem;
3385 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3386 * @qc: Command with memory buffer to be mapped.
3388 * DMA-map the memory buffer associated with queued_cmd @qc.
3391 * spin_lock_irqsave(host_set lock)
3394 * Zero on success, negative on error.
3397 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3399 struct ata_port *ap = qc->ap;
3400 int dir = qc->dma_dir;
3401 struct scatterlist *sg = qc->__sg;
3402 dma_addr_t dma_address;
3405 /* we must lengthen transfers to end on a 32-bit boundary */
3406 qc->pad_len = sg->length & 3;
3408 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3409 struct scatterlist *psg = &qc->pad_sgent;
3411 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3413 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3415 if (qc->tf.flags & ATA_TFLAG_WRITE)
3416 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3419 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3420 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3422 sg->length -= qc->pad_len;
3423 if (sg->length == 0)
3426 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3427 sg->length, qc->pad_len);
3435 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3437 if (dma_mapping_error(dma_address)) {
3439 sg->length += qc->pad_len;
3443 sg_dma_address(sg) = dma_address;
3444 sg_dma_len(sg) = sg->length;
3447 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3448 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3454 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3455 * @qc: Command with scatter-gather table to be mapped.
3457 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3460 * spin_lock_irqsave(host_set lock)
3463 * Zero on success, negative on error.
3467 static int ata_sg_setup(struct ata_queued_cmd *qc)
3469 struct ata_port *ap = qc->ap;
3470 struct scatterlist *sg = qc->__sg;
3471 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3472 int n_elem, pre_n_elem, dir, trim_sg = 0;
3474 VPRINTK("ENTER, ata%u\n", ap->id);
3475 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3477 /* we must lengthen transfers to end on a 32-bit boundary */
3478 qc->pad_len = lsg->length & 3;
3480 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3481 struct scatterlist *psg = &qc->pad_sgent;
3482 unsigned int offset;
3484 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3486 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3489 * psg->page/offset are used to copy to-be-written
3490 * data in this function or read data in ata_sg_clean.
3492 offset = lsg->offset + lsg->length - qc->pad_len;
3493 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3494 psg->offset = offset_in_page(offset);
3496 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3497 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3498 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3499 kunmap_atomic(addr, KM_IRQ0);
3502 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3503 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3505 lsg->length -= qc->pad_len;
3506 if (lsg->length == 0)
3509 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3510 qc->n_elem - 1, lsg->length, qc->pad_len);
3513 pre_n_elem = qc->n_elem;
3514 if (trim_sg && pre_n_elem)
3523 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3525 /* restore last sg */
3526 lsg->length += qc->pad_len;
3530 DPRINTK("%d sg elements mapped\n", n_elem);
3533 qc->n_elem = n_elem;
3539 * swap_buf_le16 - swap halves of 16-bit words in place
3540 * @buf: Buffer to swap
3541 * @buf_words: Number of 16-bit words in buffer.
3543 * Swap halves of 16-bit words if needed to convert from
3544 * little-endian byte order to native cpu byte order, or
3548 * Inherited from caller.
3550 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3555 for (i = 0; i < buf_words; i++)
3556 buf[i] = le16_to_cpu(buf[i]);
3557 #endif /* __BIG_ENDIAN */
3561 * ata_mmio_data_xfer - Transfer data by MMIO
3562 * @adev: device for this I/O
3564 * @buflen: buffer length
3565 * @write_data: read/write
3567 * Transfer data from/to the device data register by MMIO.
3570 * Inherited from caller.
3573 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3574 unsigned int buflen, int write_data)
3576 struct ata_port *ap = adev->ap;
3578 unsigned int words = buflen >> 1;
3579 u16 *buf16 = (u16 *) buf;
3580 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3582 /* Transfer multiple of 2 bytes */
3584 for (i = 0; i < words; i++)
3585 writew(le16_to_cpu(buf16[i]), mmio);
3587 for (i = 0; i < words; i++)
3588 buf16[i] = cpu_to_le16(readw(mmio));
3591 /* Transfer trailing 1 byte, if any. */
3592 if (unlikely(buflen & 0x01)) {
3593 u16 align_buf[1] = { 0 };
3594 unsigned char *trailing_buf = buf + buflen - 1;
3597 memcpy(align_buf, trailing_buf, 1);
3598 writew(le16_to_cpu(align_buf[0]), mmio);
3600 align_buf[0] = cpu_to_le16(readw(mmio));
3601 memcpy(trailing_buf, align_buf, 1);
3607 * ata_pio_data_xfer - Transfer data by PIO
3608 * @adev: device to target
3610 * @buflen: buffer length
3611 * @write_data: read/write
3613 * Transfer data from/to the device data register by PIO.
3616 * Inherited from caller.
3619 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3620 unsigned int buflen, int write_data)
3622 struct ata_port *ap = adev->ap;
3623 unsigned int words = buflen >> 1;
3625 /* Transfer multiple of 2 bytes */
3627 outsw(ap->ioaddr.data_addr, buf, words);
3629 insw(ap->ioaddr.data_addr, buf, words);
3631 /* Transfer trailing 1 byte, if any. */
3632 if (unlikely(buflen & 0x01)) {
3633 u16 align_buf[1] = { 0 };
3634 unsigned char *trailing_buf = buf + buflen - 1;
3637 memcpy(align_buf, trailing_buf, 1);
3638 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3640 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3641 memcpy(trailing_buf, align_buf, 1);
3647 * ata_pio_data_xfer_noirq - Transfer data by PIO
3648 * @adev: device to target
3650 * @buflen: buffer length
3651 * @write_data: read/write
3653 * Transfer data from/to the device data register by PIO. Do the
3654 * transfer with interrupts disabled.
3657 * Inherited from caller.
3660 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3661 unsigned int buflen, int write_data)
3663 unsigned long flags;
3664 local_irq_save(flags);
3665 ata_pio_data_xfer(adev, buf, buflen, write_data);
3666 local_irq_restore(flags);
3671 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3672 * @qc: Command on going
3674 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3677 * Inherited from caller.
3680 static void ata_pio_sector(struct ata_queued_cmd *qc)
3682 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3683 struct scatterlist *sg = qc->__sg;
3684 struct ata_port *ap = qc->ap;
3686 unsigned int offset;
3689 if (qc->cursect == (qc->nsect - 1))
3690 ap->hsm_task_state = HSM_ST_LAST;
3692 page = sg[qc->cursg].page;
3693 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3695 /* get the current page and offset */
3696 page = nth_page(page, (offset >> PAGE_SHIFT));
3697 offset %= PAGE_SIZE;
3699 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3701 if (PageHighMem(page)) {
3702 unsigned long flags;
3704 /* FIXME: use a bounce buffer */
3705 local_irq_save(flags);
3706 buf = kmap_atomic(page, KM_IRQ0);
3708 /* do the actual data transfer */
3709 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3711 kunmap_atomic(buf, KM_IRQ0);
3712 local_irq_restore(flags);
3714 buf = page_address(page);
3715 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3721 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3728 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3729 * @qc: Command on going
3731 * Transfer one or many ATA_SECT_SIZE of data from/to the
3732 * ATA device for the DRQ request.
3735 * Inherited from caller.
3738 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3740 if (is_multi_taskfile(&qc->tf)) {
3741 /* READ/WRITE MULTIPLE */
3744 WARN_ON(qc->dev->multi_count == 0);
3746 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3754 * atapi_send_cdb - Write CDB bytes to hardware
3755 * @ap: Port to which ATAPI device is attached.
3756 * @qc: Taskfile currently active
3758 * When device has indicated its readiness to accept
3759 * a CDB, this function is called. Send the CDB.
3765 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3768 DPRINTK("send cdb\n");
3769 WARN_ON(qc->dev->cdb_len < 12);
3771 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3772 ata_altstatus(ap); /* flush */
3774 switch (qc->tf.protocol) {
3775 case ATA_PROT_ATAPI:
3776 ap->hsm_task_state = HSM_ST;
3778 case ATA_PROT_ATAPI_NODATA:
3779 ap->hsm_task_state = HSM_ST_LAST;
3781 case ATA_PROT_ATAPI_DMA:
3782 ap->hsm_task_state = HSM_ST_LAST;
3783 /* initiate bmdma */
3784 ap->ops->bmdma_start(qc);
3790 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3791 * @qc: Command on going
3792 * @bytes: number of bytes
3794 * Transfer Transfer data from/to the ATAPI device.
3797 * Inherited from caller.
3801 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3803 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3804 struct scatterlist *sg = qc->__sg;
3805 struct ata_port *ap = qc->ap;
3808 unsigned int offset, count;
3810 if (qc->curbytes + bytes >= qc->nbytes)
3811 ap->hsm_task_state = HSM_ST_LAST;
3814 if (unlikely(qc->cursg >= qc->n_elem)) {
3816 * The end of qc->sg is reached and the device expects
3817 * more data to transfer. In order not to overrun qc->sg
3818 * and fulfill length specified in the byte count register,
3819 * - for read case, discard trailing data from the device
3820 * - for write case, padding zero data to the device
3822 u16 pad_buf[1] = { 0 };
3823 unsigned int words = bytes >> 1;
3826 if (words) /* warning if bytes > 1 */
3827 ata_dev_printk(qc->dev, KERN_WARNING,
3828 "%u bytes trailing data\n", bytes);
3830 for (i = 0; i < words; i++)
3831 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3833 ap->hsm_task_state = HSM_ST_LAST;
3837 sg = &qc->__sg[qc->cursg];
3840 offset = sg->offset + qc->cursg_ofs;
3842 /* get the current page and offset */
3843 page = nth_page(page, (offset >> PAGE_SHIFT));
3844 offset %= PAGE_SIZE;
3846 /* don't overrun current sg */
3847 count = min(sg->length - qc->cursg_ofs, bytes);
3849 /* don't cross page boundaries */
3850 count = min(count, (unsigned int)PAGE_SIZE - offset);
3852 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3854 if (PageHighMem(page)) {
3855 unsigned long flags;
3857 /* FIXME: use bounce buffer */
3858 local_irq_save(flags);
3859 buf = kmap_atomic(page, KM_IRQ0);
3861 /* do the actual data transfer */
3862 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3864 kunmap_atomic(buf, KM_IRQ0);
3865 local_irq_restore(flags);
3867 buf = page_address(page);
3868 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3872 qc->curbytes += count;
3873 qc->cursg_ofs += count;
3875 if (qc->cursg_ofs == sg->length) {
3885 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3886 * @qc: Command on going
3888 * Transfer Transfer data from/to the ATAPI device.
3891 * Inherited from caller.
3894 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3896 struct ata_port *ap = qc->ap;
3897 struct ata_device *dev = qc->dev;
3898 unsigned int ireason, bc_lo, bc_hi, bytes;
3899 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3901 /* Abuse qc->result_tf for temp storage of intermediate TF
3902 * here to save some kernel stack usage.
3903 * For normal completion, qc->result_tf is not relevant. For
3904 * error, qc->result_tf is later overwritten by ata_qc_complete().
3905 * So, the correctness of qc->result_tf is not affected.
3907 ap->ops->tf_read(ap, &qc->result_tf);
3908 ireason = qc->result_tf.nsect;
3909 bc_lo = qc->result_tf.lbam;
3910 bc_hi = qc->result_tf.lbah;
3911 bytes = (bc_hi << 8) | bc_lo;
3913 /* shall be cleared to zero, indicating xfer of data */
3914 if (ireason & (1 << 0))
3917 /* make sure transfer direction matches expected */
3918 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3919 if (do_write != i_write)
3922 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3924 __atapi_pio_bytes(qc, bytes);
3929 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3930 qc->err_mask |= AC_ERR_HSM;
3931 ap->hsm_task_state = HSM_ST_ERR;
3935 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3936 * @ap: the target ata_port
3940 * 1 if ok in workqueue, 0 otherwise.
3943 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3945 if (qc->tf.flags & ATA_TFLAG_POLLING)
3948 if (ap->hsm_task_state == HSM_ST_FIRST) {
3949 if (qc->tf.protocol == ATA_PROT_PIO &&
3950 (qc->tf.flags & ATA_TFLAG_WRITE))
3953 if (is_atapi_taskfile(&qc->tf) &&
3954 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3962 * ata_hsm_qc_complete - finish a qc running on standard HSM
3963 * @qc: Command to complete
3964 * @in_wq: 1 if called from workqueue, 0 otherwise
3966 * Finish @qc which is running on standard HSM.
3969 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3970 * Otherwise, none on entry and grabs host lock.
3972 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3974 struct ata_port *ap = qc->ap;
3975 unsigned long flags;
3977 if (ap->ops->error_handler) {
3979 spin_lock_irqsave(ap->lock, flags);
3981 /* EH might have kicked in while host_set lock
3984 qc = ata_qc_from_tag(ap, qc->tag);
3986 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3988 ata_qc_complete(qc);
3990 ata_port_freeze(ap);
3993 spin_unlock_irqrestore(ap->lock, flags);
3995 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3996 ata_qc_complete(qc);
3998 ata_port_freeze(ap);
4002 spin_lock_irqsave(ap->lock, flags);
4004 ata_qc_complete(qc);
4005 spin_unlock_irqrestore(ap->lock, flags);
4007 ata_qc_complete(qc);
4010 ata_altstatus(ap); /* flush */
4014 * ata_hsm_move - move the HSM to the next state.
4015 * @ap: the target ata_port
4017 * @status: current device status
4018 * @in_wq: 1 if called from workqueue, 0 otherwise
4021 * 1 when poll next status needed, 0 otherwise.
4023 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4024 u8 status, int in_wq)
4026 unsigned long flags = 0;
4029 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4031 /* Make sure ata_qc_issue_prot() does not throw things
4032 * like DMA polling into the workqueue. Notice that
4033 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4035 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4038 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4039 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4041 switch (ap->hsm_task_state) {
4043 /* Send first data block or PACKET CDB */
4045 /* If polling, we will stay in the work queue after
4046 * sending the data. Otherwise, interrupt handler
4047 * takes over after sending the data.
4049 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4051 /* check device status */
4052 if (unlikely((status & ATA_DRQ) == 0)) {
4053 /* handle BSY=0, DRQ=0 as error */
4054 if (likely(status & (ATA_ERR | ATA_DF)))
4055 /* device stops HSM for abort/error */
4056 qc->err_mask |= AC_ERR_DEV;
4058 /* HSM violation. Let EH handle this */
4059 qc->err_mask |= AC_ERR_HSM;
4061 ap->hsm_task_state = HSM_ST_ERR;
4065 /* Device should not ask for data transfer (DRQ=1)
4066 * when it finds something wrong.
4067 * We ignore DRQ here and stop the HSM by
4068 * changing hsm_task_state to HSM_ST_ERR and
4069 * let the EH abort the command or reset the device.
4071 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4072 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4074 qc->err_mask |= AC_ERR_HSM;
4075 ap->hsm_task_state = HSM_ST_ERR;
4079 /* Send the CDB (atapi) or the first data block (ata pio out).
4080 * During the state transition, interrupt handler shouldn't
4081 * be invoked before the data transfer is complete and
4082 * hsm_task_state is changed. Hence, the following locking.
4085 spin_lock_irqsave(ap->lock, flags);
4087 if (qc->tf.protocol == ATA_PROT_PIO) {
4088 /* PIO data out protocol.
4089 * send first data block.
4092 /* ata_pio_sectors() might change the state
4093 * to HSM_ST_LAST. so, the state is changed here
4094 * before ata_pio_sectors().
4096 ap->hsm_task_state = HSM_ST;
4097 ata_pio_sectors(qc);
4098 ata_altstatus(ap); /* flush */
4101 atapi_send_cdb(ap, qc);
4104 spin_unlock_irqrestore(ap->lock, flags);
4106 /* if polling, ata_pio_task() handles the rest.
4107 * otherwise, interrupt handler takes over from here.
4112 /* complete command or read/write the data register */
4113 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4114 /* ATAPI PIO protocol */
4115 if ((status & ATA_DRQ) == 0) {
4116 /* No more data to transfer or device error.
4117 * Device error will be tagged in HSM_ST_LAST.
4119 ap->hsm_task_state = HSM_ST_LAST;
4123 /* Device should not ask for data transfer (DRQ=1)
4124 * when it finds something wrong.
4125 * We ignore DRQ here and stop the HSM by
4126 * changing hsm_task_state to HSM_ST_ERR and
4127 * let the EH abort the command or reset the device.
4129 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4130 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4132 qc->err_mask |= AC_ERR_HSM;
4133 ap->hsm_task_state = HSM_ST_ERR;
4137 atapi_pio_bytes(qc);
4139 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4140 /* bad ireason reported by device */
4144 /* ATA PIO protocol */
4145 if (unlikely((status & ATA_DRQ) == 0)) {
4146 /* handle BSY=0, DRQ=0 as error */
4147 if (likely(status & (ATA_ERR | ATA_DF)))
4148 /* device stops HSM for abort/error */
4149 qc->err_mask |= AC_ERR_DEV;
4151 /* HSM violation. Let EH handle this */
4152 qc->err_mask |= AC_ERR_HSM;
4154 ap->hsm_task_state = HSM_ST_ERR;
4158 /* For PIO reads, some devices may ask for
4159 * data transfer (DRQ=1) alone with ERR=1.
4160 * We respect DRQ here and transfer one
4161 * block of junk data before changing the
4162 * hsm_task_state to HSM_ST_ERR.
4164 * For PIO writes, ERR=1 DRQ=1 doesn't make
4165 * sense since the data block has been
4166 * transferred to the device.
4168 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4169 /* data might be corrputed */
4170 qc->err_mask |= AC_ERR_DEV;
4172 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4173 ata_pio_sectors(qc);
4175 status = ata_wait_idle(ap);
4178 if (status & (ATA_BUSY | ATA_DRQ))
4179 qc->err_mask |= AC_ERR_HSM;
4181 /* ata_pio_sectors() might change the
4182 * state to HSM_ST_LAST. so, the state
4183 * is changed after ata_pio_sectors().
4185 ap->hsm_task_state = HSM_ST_ERR;
4189 ata_pio_sectors(qc);
4191 if (ap->hsm_task_state == HSM_ST_LAST &&
4192 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4195 status = ata_wait_idle(ap);
4200 ata_altstatus(ap); /* flush */
4205 if (unlikely(!ata_ok(status))) {
4206 qc->err_mask |= __ac_err_mask(status);
4207 ap->hsm_task_state = HSM_ST_ERR;
4211 /* no more data to transfer */
4212 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4213 ap->id, qc->dev->devno, status);
4215 WARN_ON(qc->err_mask);
4217 ap->hsm_task_state = HSM_ST_IDLE;
4219 /* complete taskfile transaction */
4220 ata_hsm_qc_complete(qc, in_wq);
4226 /* make sure qc->err_mask is available to
4227 * know what's wrong and recover
4229 WARN_ON(qc->err_mask == 0);
4231 ap->hsm_task_state = HSM_ST_IDLE;
4233 /* complete taskfile transaction */
4234 ata_hsm_qc_complete(qc, in_wq);
4246 static void ata_pio_task(void *_data)
4248 struct ata_queued_cmd *qc = _data;
4249 struct ata_port *ap = qc->ap;
4254 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4257 * This is purely heuristic. This is a fast path.
4258 * Sometimes when we enter, BSY will be cleared in
4259 * a chk-status or two. If not, the drive is probably seeking
4260 * or something. Snooze for a couple msecs, then
4261 * chk-status again. If still busy, queue delayed work.
4263 status = ata_busy_wait(ap, ATA_BUSY, 5);
4264 if (status & ATA_BUSY) {
4266 status = ata_busy_wait(ap, ATA_BUSY, 10);
4267 if (status & ATA_BUSY) {
4268 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4274 poll_next = ata_hsm_move(ap, qc, status, 1);
4276 /* another command or interrupt handler
4277 * may be running at this point.
4284 * ata_qc_new - Request an available ATA command, for queueing
4285 * @ap: Port associated with device @dev
4286 * @dev: Device from whom we request an available command structure
4292 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4294 struct ata_queued_cmd *qc = NULL;
4297 /* no command while frozen */
4298 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4301 /* the last tag is reserved for internal command. */
4302 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4303 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4304 qc = __ata_qc_from_tag(ap, i);
4315 * ata_qc_new_init - Request an available ATA command, and initialize it
4316 * @dev: Device from whom we request an available command structure
4322 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4324 struct ata_port *ap = dev->ap;
4325 struct ata_queued_cmd *qc;
4327 qc = ata_qc_new(ap);
4340 * ata_qc_free - free unused ata_queued_cmd
4341 * @qc: Command to complete
4343 * Designed to free unused ata_queued_cmd object
4344 * in case something prevents using it.
4347 * spin_lock_irqsave(host_set lock)
4349 void ata_qc_free(struct ata_queued_cmd *qc)
4351 struct ata_port *ap = qc->ap;
4354 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4358 if (likely(ata_tag_valid(tag))) {
4359 qc->tag = ATA_TAG_POISON;
4360 clear_bit(tag, &ap->qc_allocated);
4364 void __ata_qc_complete(struct ata_queued_cmd *qc)
4366 struct ata_port *ap = qc->ap;
4368 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4369 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4371 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4374 /* command should be marked inactive atomically with qc completion */
4375 if (qc->tf.protocol == ATA_PROT_NCQ)
4376 ap->sactive &= ~(1 << qc->tag);
4378 ap->active_tag = ATA_TAG_POISON;
4380 /* atapi: mark qc as inactive to prevent the interrupt handler
4381 * from completing the command twice later, before the error handler
4382 * is called. (when rc != 0 and atapi request sense is needed)
4384 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4385 ap->qc_active &= ~(1 << qc->tag);
4387 /* call completion callback */
4388 qc->complete_fn(qc);
4392 * ata_qc_complete - Complete an active ATA command
4393 * @qc: Command to complete
4394 * @err_mask: ATA Status register contents
4396 * Indicate to the mid and upper layers that an ATA
4397 * command has completed, with either an ok or not-ok status.
4400 * spin_lock_irqsave(host_set lock)
4402 void ata_qc_complete(struct ata_queued_cmd *qc)
4404 struct ata_port *ap = qc->ap;
4406 /* XXX: New EH and old EH use different mechanisms to
4407 * synchronize EH with regular execution path.
4409 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4410 * Normal execution path is responsible for not accessing a
4411 * failed qc. libata core enforces the rule by returning NULL
4412 * from ata_qc_from_tag() for failed qcs.
4414 * Old EH depends on ata_qc_complete() nullifying completion
4415 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4416 * not synchronize with interrupt handler. Only PIO task is
4419 if (ap->ops->error_handler) {
4420 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4422 if (unlikely(qc->err_mask))
4423 qc->flags |= ATA_QCFLAG_FAILED;
4425 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4426 if (!ata_tag_internal(qc->tag)) {
4427 /* always fill result TF for failed qc */
4428 ap->ops->tf_read(ap, &qc->result_tf);
4429 ata_qc_schedule_eh(qc);
4434 /* read result TF if requested */
4435 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4436 ap->ops->tf_read(ap, &qc->result_tf);
4438 __ata_qc_complete(qc);
4440 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4443 /* read result TF if failed or requested */
4444 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4445 ap->ops->tf_read(ap, &qc->result_tf);
4447 __ata_qc_complete(qc);
4452 * ata_qc_complete_multiple - Complete multiple qcs successfully
4453 * @ap: port in question
4454 * @qc_active: new qc_active mask
4455 * @finish_qc: LLDD callback invoked before completing a qc
4457 * Complete in-flight commands. This functions is meant to be
4458 * called from low-level driver's interrupt routine to complete
4459 * requests normally. ap->qc_active and @qc_active is compared
4460 * and commands are completed accordingly.
4463 * spin_lock_irqsave(host_set lock)
4466 * Number of completed commands on success, -errno otherwise.
4468 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4469 void (*finish_qc)(struct ata_queued_cmd *))
4475 done_mask = ap->qc_active ^ qc_active;
4477 if (unlikely(done_mask & qc_active)) {
4478 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4479 "(%08x->%08x)\n", ap->qc_active, qc_active);
4483 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4484 struct ata_queued_cmd *qc;
4486 if (!(done_mask & (1 << i)))
4489 if ((qc = ata_qc_from_tag(ap, i))) {
4492 ata_qc_complete(qc);
4500 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4502 struct ata_port *ap = qc->ap;
4504 switch (qc->tf.protocol) {
4507 case ATA_PROT_ATAPI_DMA:
4510 case ATA_PROT_ATAPI:
4512 if (ap->flags & ATA_FLAG_PIO_DMA)
4525 * ata_qc_issue - issue taskfile to device
4526 * @qc: command to issue to device
4528 * Prepare an ATA command to submission to device.
4529 * This includes mapping the data into a DMA-able
4530 * area, filling in the S/G table, and finally
4531 * writing the taskfile to hardware, starting the command.
4534 * spin_lock_irqsave(host_set lock)
4536 void ata_qc_issue(struct ata_queued_cmd *qc)
4538 struct ata_port *ap = qc->ap;
4540 /* Make sure only one non-NCQ command is outstanding. The
4541 * check is skipped for old EH because it reuses active qc to
4542 * request ATAPI sense.
4544 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4546 if (qc->tf.protocol == ATA_PROT_NCQ) {
4547 WARN_ON(ap->sactive & (1 << qc->tag));
4548 ap->sactive |= 1 << qc->tag;
4550 WARN_ON(ap->sactive);
4551 ap->active_tag = qc->tag;
4554 qc->flags |= ATA_QCFLAG_ACTIVE;
4555 ap->qc_active |= 1 << qc->tag;
4557 if (ata_should_dma_map(qc)) {
4558 if (qc->flags & ATA_QCFLAG_SG) {
4559 if (ata_sg_setup(qc))
4561 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4562 if (ata_sg_setup_one(qc))
4566 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4569 ap->ops->qc_prep(qc);
4571 qc->err_mask |= ap->ops->qc_issue(qc);
4572 if (unlikely(qc->err_mask))
4577 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4578 qc->err_mask |= AC_ERR_SYSTEM;
4580 ata_qc_complete(qc);
4584 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4585 * @qc: command to issue to device
4587 * Using various libata functions and hooks, this function
4588 * starts an ATA command. ATA commands are grouped into
4589 * classes called "protocols", and issuing each type of protocol
4590 * is slightly different.
4592 * May be used as the qc_issue() entry in ata_port_operations.
4595 * spin_lock_irqsave(host_set lock)
4598 * Zero on success, AC_ERR_* mask on failure
4601 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4603 struct ata_port *ap = qc->ap;
4605 /* Use polling pio if the LLD doesn't handle
4606 * interrupt driven pio and atapi CDB interrupt.
4608 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4609 switch (qc->tf.protocol) {
4611 case ATA_PROT_ATAPI:
4612 case ATA_PROT_ATAPI_NODATA:
4613 qc->tf.flags |= ATA_TFLAG_POLLING;
4615 case ATA_PROT_ATAPI_DMA:
4616 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4617 /* see ata_dma_blacklisted() */
4625 /* select the device */
4626 ata_dev_select(ap, qc->dev->devno, 1, 0);
4628 /* start the command */
4629 switch (qc->tf.protocol) {
4630 case ATA_PROT_NODATA:
4631 if (qc->tf.flags & ATA_TFLAG_POLLING)
4632 ata_qc_set_polling(qc);
4634 ata_tf_to_host(ap, &qc->tf);
4635 ap->hsm_task_state = HSM_ST_LAST;
4637 if (qc->tf.flags & ATA_TFLAG_POLLING)
4638 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4643 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4645 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4646 ap->ops->bmdma_setup(qc); /* set up bmdma */
4647 ap->ops->bmdma_start(qc); /* initiate bmdma */
4648 ap->hsm_task_state = HSM_ST_LAST;
4652 if (qc->tf.flags & ATA_TFLAG_POLLING)
4653 ata_qc_set_polling(qc);
4655 ata_tf_to_host(ap, &qc->tf);
4657 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4658 /* PIO data out protocol */
4659 ap->hsm_task_state = HSM_ST_FIRST;
4660 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4662 /* always send first data block using
4663 * the ata_pio_task() codepath.
4666 /* PIO data in protocol */
4667 ap->hsm_task_state = HSM_ST;
4669 if (qc->tf.flags & ATA_TFLAG_POLLING)
4670 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4672 /* if polling, ata_pio_task() handles the rest.
4673 * otherwise, interrupt handler takes over from here.
4679 case ATA_PROT_ATAPI:
4680 case ATA_PROT_ATAPI_NODATA:
4681 if (qc->tf.flags & ATA_TFLAG_POLLING)
4682 ata_qc_set_polling(qc);
4684 ata_tf_to_host(ap, &qc->tf);
4686 ap->hsm_task_state = HSM_ST_FIRST;
4688 /* send cdb by polling if no cdb interrupt */
4689 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4690 (qc->tf.flags & ATA_TFLAG_POLLING))
4691 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4694 case ATA_PROT_ATAPI_DMA:
4695 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4697 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4698 ap->ops->bmdma_setup(qc); /* set up bmdma */
4699 ap->hsm_task_state = HSM_ST_FIRST;
4701 /* send cdb by polling if no cdb interrupt */
4702 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4703 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4708 return AC_ERR_SYSTEM;
4715 * ata_host_intr - Handle host interrupt for given (port, task)
4716 * @ap: Port on which interrupt arrived (possibly...)
4717 * @qc: Taskfile currently active in engine
4719 * Handle host interrupt for given queued command. Currently,
4720 * only DMA interrupts are handled. All other commands are
4721 * handled via polling with interrupts disabled (nIEN bit).
4724 * spin_lock_irqsave(host_set lock)
4727 * One if interrupt was handled, zero if not (shared irq).
4730 inline unsigned int ata_host_intr (struct ata_port *ap,
4731 struct ata_queued_cmd *qc)
4733 u8 status, host_stat = 0;
4735 VPRINTK("ata%u: protocol %d task_state %d\n",
4736 ap->id, qc->tf.protocol, ap->hsm_task_state);
4738 /* Check whether we are expecting interrupt in this state */
4739 switch (ap->hsm_task_state) {
4741 /* Some pre-ATAPI-4 devices assert INTRQ
4742 * at this state when ready to receive CDB.
4745 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4746 * The flag was turned on only for atapi devices.
4747 * No need to check is_atapi_taskfile(&qc->tf) again.
4749 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4753 if (qc->tf.protocol == ATA_PROT_DMA ||
4754 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4755 /* check status of DMA engine */
4756 host_stat = ap->ops->bmdma_status(ap);
4757 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4759 /* if it's not our irq... */
4760 if (!(host_stat & ATA_DMA_INTR))
4763 /* before we do anything else, clear DMA-Start bit */
4764 ap->ops->bmdma_stop(qc);
4766 if (unlikely(host_stat & ATA_DMA_ERR)) {
4767 /* error when transfering data to/from memory */
4768 qc->err_mask |= AC_ERR_HOST_BUS;
4769 ap->hsm_task_state = HSM_ST_ERR;
4779 /* check altstatus */
4780 status = ata_altstatus(ap);
4781 if (status & ATA_BUSY)
4784 /* check main status, clearing INTRQ */
4785 status = ata_chk_status(ap);
4786 if (unlikely(status & ATA_BUSY))
4789 /* ack bmdma irq events */
4790 ap->ops->irq_clear(ap);
4792 ata_hsm_move(ap, qc, status, 0);
4793 return 1; /* irq handled */
4796 ap->stats.idle_irq++;
4799 if ((ap->stats.idle_irq % 1000) == 0) {
4800 ata_irq_ack(ap, 0); /* debug trap */
4801 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4805 return 0; /* irq not handled */
4809 * ata_interrupt - Default ATA host interrupt handler
4810 * @irq: irq line (unused)
4811 * @dev_instance: pointer to our ata_host_set information structure
4814 * Default interrupt handler for PCI IDE devices. Calls
4815 * ata_host_intr() for each port that is not disabled.
4818 * Obtains host_set lock during operation.
4821 * IRQ_NONE or IRQ_HANDLED.
4824 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4826 struct ata_host_set *host_set = dev_instance;
4828 unsigned int handled = 0;
4829 unsigned long flags;
4831 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4832 spin_lock_irqsave(&host_set->lock, flags);
4834 for (i = 0; i < host_set->n_ports; i++) {
4835 struct ata_port *ap;
4837 ap = host_set->ports[i];
4839 !(ap->flags & ATA_FLAG_DISABLED)) {
4840 struct ata_queued_cmd *qc;
4842 qc = ata_qc_from_tag(ap, ap->active_tag);
4843 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4844 (qc->flags & ATA_QCFLAG_ACTIVE))
4845 handled |= ata_host_intr(ap, qc);
4849 spin_unlock_irqrestore(&host_set->lock, flags);
4851 return IRQ_RETVAL(handled);
4855 * sata_scr_valid - test whether SCRs are accessible
4856 * @ap: ATA port to test SCR accessibility for
4858 * Test whether SCRs are accessible for @ap.
4864 * 1 if SCRs are accessible, 0 otherwise.
4866 int sata_scr_valid(struct ata_port *ap)
4868 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4872 * sata_scr_read - read SCR register of the specified port
4873 * @ap: ATA port to read SCR for
4875 * @val: Place to store read value
4877 * Read SCR register @reg of @ap into *@val. This function is
4878 * guaranteed to succeed if the cable type of the port is SATA
4879 * and the port implements ->scr_read.
4885 * 0 on success, negative errno on failure.
4887 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4889 if (sata_scr_valid(ap)) {
4890 *val = ap->ops->scr_read(ap, reg);
4897 * sata_scr_write - write SCR register of the specified port
4898 * @ap: ATA port to write SCR for
4899 * @reg: SCR to write
4900 * @val: value to write
4902 * Write @val to SCR register @reg of @ap. This function is
4903 * guaranteed to succeed if the cable type of the port is SATA
4904 * and the port implements ->scr_read.
4910 * 0 on success, negative errno on failure.
4912 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4914 if (sata_scr_valid(ap)) {
4915 ap->ops->scr_write(ap, reg, val);
4922 * sata_scr_write_flush - write SCR register of the specified port and flush
4923 * @ap: ATA port to write SCR for
4924 * @reg: SCR to write
4925 * @val: value to write
4927 * This function is identical to sata_scr_write() except that this
4928 * function performs flush after writing to the register.
4934 * 0 on success, negative errno on failure.
4936 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4938 if (sata_scr_valid(ap)) {
4939 ap->ops->scr_write(ap, reg, val);
4940 ap->ops->scr_read(ap, reg);
4947 * ata_port_online - test whether the given port is online
4948 * @ap: ATA port to test
4950 * Test whether @ap is online. Note that this function returns 0
4951 * if online status of @ap cannot be obtained, so
4952 * ata_port_online(ap) != !ata_port_offline(ap).
4958 * 1 if the port online status is available and online.
4960 int ata_port_online(struct ata_port *ap)
4964 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4970 * ata_port_offline - test whether the given port is offline
4971 * @ap: ATA port to test
4973 * Test whether @ap is offline. Note that this function returns
4974 * 0 if offline status of @ap cannot be obtained, so
4975 * ata_port_online(ap) != !ata_port_offline(ap).
4981 * 1 if the port offline status is available and offline.
4983 int ata_port_offline(struct ata_port *ap)
4987 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4992 int ata_flush_cache(struct ata_device *dev)
4994 unsigned int err_mask;
4997 if (!ata_try_flush_cache(dev))
5000 if (ata_id_has_flush_ext(dev->id))
5001 cmd = ATA_CMD_FLUSH_EXT;
5003 cmd = ATA_CMD_FLUSH;
5005 err_mask = ata_do_simple_cmd(dev, cmd);
5007 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5014 static int ata_standby_drive(struct ata_device *dev)
5016 unsigned int err_mask;
5018 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
5020 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5021 "(err_mask=0x%x)\n", err_mask);
5028 static int ata_start_drive(struct ata_device *dev)
5030 unsigned int err_mask;
5032 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
5034 ata_dev_printk(dev, KERN_ERR, "failed to start drive "
5035 "(err_mask=0x%x)\n", err_mask);
5043 * ata_device_resume - wakeup a previously suspended devices
5044 * @dev: the device to resume
5046 * Kick the drive back into action, by sending it an idle immediate
5047 * command and making sure its transfer mode matches between drive
5051 int ata_device_resume(struct ata_device *dev)
5053 struct ata_port *ap = dev->ap;
5055 if (ap->pflags & ATA_PFLAG_SUSPENDED) {
5056 struct ata_device *failed_dev;
5058 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
5059 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5061 ap->pflags &= ~ATA_PFLAG_SUSPENDED;
5062 while (ata_set_mode(ap, &failed_dev))
5063 ata_dev_disable(failed_dev);
5065 if (!ata_dev_enabled(dev))
5067 if (dev->class == ATA_DEV_ATA)
5068 ata_start_drive(dev);
5074 * ata_device_suspend - prepare a device for suspend
5075 * @dev: the device to suspend
5076 * @state: target power management state
5078 * Flush the cache on the drive, if appropriate, then issue a
5079 * standbynow command.
5081 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5083 struct ata_port *ap = dev->ap;
5085 if (!ata_dev_enabled(dev))
5087 if (dev->class == ATA_DEV_ATA)
5088 ata_flush_cache(dev);
5090 if (state.event != PM_EVENT_FREEZE)
5091 ata_standby_drive(dev);
5092 ap->pflags |= ATA_PFLAG_SUSPENDED;
5097 * ata_port_start - Set port up for dma.
5098 * @ap: Port to initialize
5100 * Called just after data structures for each port are
5101 * initialized. Allocates space for PRD table.
5103 * May be used as the port_start() entry in ata_port_operations.
5106 * Inherited from caller.
5109 int ata_port_start (struct ata_port *ap)
5111 struct device *dev = ap->dev;
5114 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5118 rc = ata_pad_alloc(ap, dev);
5120 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5124 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5131 * ata_port_stop - Undo ata_port_start()
5132 * @ap: Port to shut down
5134 * Frees the PRD table.
5136 * May be used as the port_stop() entry in ata_port_operations.
5139 * Inherited from caller.
5142 void ata_port_stop (struct ata_port *ap)
5144 struct device *dev = ap->dev;
5146 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5147 ata_pad_free(ap, dev);
5150 void ata_host_stop (struct ata_host_set *host_set)
5152 if (host_set->mmio_base)
5153 iounmap(host_set->mmio_base);
5158 * ata_host_remove - Unregister SCSI host structure with upper layers
5159 * @ap: Port to unregister
5160 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5163 * Inherited from caller.
5166 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5168 struct Scsi_Host *sh = ap->host;
5173 scsi_remove_host(sh);
5175 ap->ops->port_stop(ap);
5179 * ata_dev_init - Initialize an ata_device structure
5180 * @dev: Device structure to initialize
5182 * Initialize @dev in preparation for probing.
5185 * Inherited from caller.
5187 void ata_dev_init(struct ata_device *dev)
5189 struct ata_port *ap = dev->ap;
5190 unsigned long flags;
5192 /* SATA spd limit is bound to the first device */
5193 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5195 /* High bits of dev->flags are used to record warm plug
5196 * requests which occur asynchronously. Synchronize using
5199 spin_lock_irqsave(ap->lock, flags);
5200 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5201 spin_unlock_irqrestore(ap->lock, flags);
5203 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5204 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5205 dev->pio_mask = UINT_MAX;
5206 dev->mwdma_mask = UINT_MAX;
5207 dev->udma_mask = UINT_MAX;
5211 * ata_host_init - Initialize an ata_port structure
5212 * @ap: Structure to initialize
5213 * @host: associated SCSI mid-layer structure
5214 * @host_set: Collection of hosts to which @ap belongs
5215 * @ent: Probe information provided by low-level driver
5216 * @port_no: Port number associated with this ata_port
5218 * Initialize a new ata_port structure, and its associated
5222 * Inherited from caller.
5224 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5225 struct ata_host_set *host_set,
5226 const struct ata_probe_ent *ent, unsigned int port_no)
5232 host->max_channel = 1;
5233 host->unique_id = ata_unique_id++;
5234 host->max_cmd_len = 12;
5236 ap->lock = &host_set->lock;
5237 ap->flags = ATA_FLAG_DISABLED;
5238 ap->id = host->unique_id;
5240 ap->ctl = ATA_DEVCTL_OBS;
5241 ap->host_set = host_set;
5243 ap->port_no = port_no;
5245 ent->legacy_mode ? ent->hard_port_no : port_no;
5246 ap->pio_mask = ent->pio_mask;
5247 ap->mwdma_mask = ent->mwdma_mask;
5248 ap->udma_mask = ent->udma_mask;
5249 ap->flags |= ent->host_flags;
5250 ap->ops = ent->port_ops;
5251 ap->hw_sata_spd_limit = UINT_MAX;
5252 ap->active_tag = ATA_TAG_POISON;
5253 ap->last_ctl = 0xFF;
5255 #if defined(ATA_VERBOSE_DEBUG)
5256 /* turn on all debugging levels */
5257 ap->msg_enable = 0x00FF;
5258 #elif defined(ATA_DEBUG)
5259 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5261 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5264 INIT_WORK(&ap->port_task, NULL, NULL);
5265 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5266 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5267 INIT_LIST_HEAD(&ap->eh_done_q);
5268 init_waitqueue_head(&ap->eh_wait_q);
5270 /* set cable type */
5271 ap->cbl = ATA_CBL_NONE;
5272 if (ap->flags & ATA_FLAG_SATA)
5273 ap->cbl = ATA_CBL_SATA;
5275 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5276 struct ata_device *dev = &ap->device[i];
5283 ap->stats.unhandled_irq = 1;
5284 ap->stats.idle_irq = 1;
5287 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5291 * ata_host_add - Attach low-level ATA driver to system
5292 * @ent: Information provided by low-level driver
5293 * @host_set: Collections of ports to which we add
5294 * @port_no: Port number associated with this host
5296 * Attach low-level ATA driver to system.
5299 * PCI/etc. bus probe sem.
5302 * New ata_port on success, for NULL on error.
5305 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5306 struct ata_host_set *host_set,
5307 unsigned int port_no)
5309 struct Scsi_Host *host;
5310 struct ata_port *ap;
5315 if (!ent->port_ops->error_handler &&
5316 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5317 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5322 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5326 host->transportt = &ata_scsi_transport_template;
5328 ap = ata_shost_to_port(host);
5330 ata_host_init(ap, host, host_set, ent, port_no);
5332 rc = ap->ops->port_start(ap);
5339 scsi_host_put(host);
5344 * ata_device_add - Register hardware device with ATA and SCSI layers
5345 * @ent: Probe information describing hardware device to be registered
5347 * This function processes the information provided in the probe
5348 * information struct @ent, allocates the necessary ATA and SCSI
5349 * host information structures, initializes them, and registers
5350 * everything with requisite kernel subsystems.
5352 * This function requests irqs, probes the ATA bus, and probes
5356 * PCI/etc. bus probe sem.
5359 * Number of ports registered. Zero on error (no ports registered).
5361 int ata_device_add(const struct ata_probe_ent *ent)
5363 unsigned int count = 0, i;
5364 struct device *dev = ent->dev;
5365 struct ata_host_set *host_set;
5369 /* alloc a container for our list of ATA ports (buses) */
5370 host_set = kzalloc(sizeof(struct ata_host_set) +
5371 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5374 spin_lock_init(&host_set->lock);
5376 host_set->dev = dev;
5377 host_set->n_ports = ent->n_ports;
5378 host_set->irq = ent->irq;
5379 host_set->mmio_base = ent->mmio_base;
5380 host_set->private_data = ent->private_data;
5381 host_set->ops = ent->port_ops;
5382 host_set->flags = ent->host_set_flags;
5384 /* register each port bound to this device */
5385 for (i = 0; i < ent->n_ports; i++) {
5386 struct ata_port *ap;
5387 unsigned long xfer_mode_mask;
5389 ap = ata_host_add(ent, host_set, i);
5393 host_set->ports[i] = ap;
5394 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5395 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5396 (ap->pio_mask << ATA_SHIFT_PIO);
5398 /* print per-port info to dmesg */
5399 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5400 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5401 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5402 ata_mode_string(xfer_mode_mask),
5403 ap->ioaddr.cmd_addr,
5404 ap->ioaddr.ctl_addr,
5405 ap->ioaddr.bmdma_addr,
5409 host_set->ops->irq_clear(ap);
5410 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5417 /* obtain irq, that is shared between channels */
5418 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5419 DRV_NAME, host_set);
5421 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5426 /* perform each probe synchronously */
5427 DPRINTK("probe begin\n");
5428 for (i = 0; i < count; i++) {
5429 struct ata_port *ap;
5433 ap = host_set->ports[i];
5435 /* init sata_spd_limit to the current value */
5436 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5437 int spd = (scontrol >> 4) & 0xf;
5438 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5440 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5442 rc = scsi_add_host(ap->host, dev);
5444 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5445 /* FIXME: do something useful here */
5446 /* FIXME: handle unconditional calls to
5447 * scsi_scan_host and ata_host_remove, below,
5452 if (ap->ops->error_handler) {
5453 unsigned long flags;
5457 /* kick EH for boot probing */
5458 spin_lock_irqsave(ap->lock, flags);
5460 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5461 ap->eh_info.action |= ATA_EH_SOFTRESET;
5463 ap->pflags |= ATA_PFLAG_LOADING;
5464 ata_port_schedule_eh(ap);
5466 spin_unlock_irqrestore(ap->lock, flags);
5468 /* wait for EH to finish */
5469 ata_port_wait_eh(ap);
5471 DPRINTK("ata%u: bus probe begin\n", ap->id);
5472 rc = ata_bus_probe(ap);
5473 DPRINTK("ata%u: bus probe end\n", ap->id);
5476 /* FIXME: do something useful here?
5477 * Current libata behavior will
5478 * tear down everything when
5479 * the module is removed
5480 * or the h/w is unplugged.
5486 /* probes are done, now scan each port's disk(s) */
5487 DPRINTK("host probe begin\n");
5488 for (i = 0; i < count; i++) {
5489 struct ata_port *ap = host_set->ports[i];
5491 ata_scsi_scan_host(ap);
5494 dev_set_drvdata(dev, host_set);
5496 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5497 return ent->n_ports; /* success */
5500 for (i = 0; i < count; i++) {
5501 ata_host_remove(host_set->ports[i], 1);
5502 scsi_host_put(host_set->ports[i]->host);
5506 VPRINTK("EXIT, returning 0\n");
5511 * ata_port_detach - Detach ATA port in prepration of device removal
5512 * @ap: ATA port to be detached
5514 * Detach all ATA devices and the associated SCSI devices of @ap;
5515 * then, remove the associated SCSI host. @ap is guaranteed to
5516 * be quiescent on return from this function.
5519 * Kernel thread context (may sleep).
5521 void ata_port_detach(struct ata_port *ap)
5523 unsigned long flags;
5526 if (!ap->ops->error_handler)
5529 /* tell EH we're leaving & flush EH */
5530 spin_lock_irqsave(ap->lock, flags);
5531 ap->pflags |= ATA_PFLAG_UNLOADING;
5532 spin_unlock_irqrestore(ap->lock, flags);
5534 ata_port_wait_eh(ap);
5536 /* EH is now guaranteed to see UNLOADING, so no new device
5537 * will be attached. Disable all existing devices.
5539 spin_lock_irqsave(ap->lock, flags);
5541 for (i = 0; i < ATA_MAX_DEVICES; i++)
5542 ata_dev_disable(&ap->device[i]);
5544 spin_unlock_irqrestore(ap->lock, flags);
5546 /* Final freeze & EH. All in-flight commands are aborted. EH
5547 * will be skipped and retrials will be terminated with bad
5550 spin_lock_irqsave(ap->lock, flags);
5551 ata_port_freeze(ap); /* won't be thawed */
5552 spin_unlock_irqrestore(ap->lock, flags);
5554 ata_port_wait_eh(ap);
5556 /* Flush hotplug task. The sequence is similar to
5557 * ata_port_flush_task().
5559 flush_workqueue(ata_aux_wq);
5560 cancel_delayed_work(&ap->hotplug_task);
5561 flush_workqueue(ata_aux_wq);
5563 /* remove the associated SCSI host */
5564 scsi_remove_host(ap->host);
5568 * ata_host_set_remove - PCI layer callback for device removal
5569 * @host_set: ATA host set that was removed
5571 * Unregister all objects associated with this host set. Free those
5575 * Inherited from calling layer (may sleep).
5578 void ata_host_set_remove(struct ata_host_set *host_set)
5582 for (i = 0; i < host_set->n_ports; i++)
5583 ata_port_detach(host_set->ports[i]);
5585 free_irq(host_set->irq, host_set);
5587 for (i = 0; i < host_set->n_ports; i++) {
5588 struct ata_port *ap = host_set->ports[i];
5590 ata_scsi_release(ap->host);
5592 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5593 struct ata_ioports *ioaddr = &ap->ioaddr;
5595 if (ioaddr->cmd_addr == 0x1f0)
5596 release_region(0x1f0, 8);
5597 else if (ioaddr->cmd_addr == 0x170)
5598 release_region(0x170, 8);
5601 scsi_host_put(ap->host);
5604 if (host_set->ops->host_stop)
5605 host_set->ops->host_stop(host_set);
5611 * ata_scsi_release - SCSI layer callback hook for host unload
5612 * @host: libata host to be unloaded
5614 * Performs all duties necessary to shut down a libata port...
5615 * Kill port kthread, disable port, and release resources.
5618 * Inherited from SCSI layer.
5624 int ata_scsi_release(struct Scsi_Host *host)
5626 struct ata_port *ap = ata_shost_to_port(host);
5630 ap->ops->port_disable(ap);
5631 ata_host_remove(ap, 0);
5638 * ata_std_ports - initialize ioaddr with standard port offsets.
5639 * @ioaddr: IO address structure to be initialized
5641 * Utility function which initializes data_addr, error_addr,
5642 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5643 * device_addr, status_addr, and command_addr to standard offsets
5644 * relative to cmd_addr.
5646 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5649 void ata_std_ports(struct ata_ioports *ioaddr)
5651 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5652 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5653 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5654 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5655 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5656 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5657 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5658 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5659 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5660 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5666 void ata_pci_host_stop (struct ata_host_set *host_set)
5668 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5670 pci_iounmap(pdev, host_set->mmio_base);
5674 * ata_pci_remove_one - PCI layer callback for device removal
5675 * @pdev: PCI device that was removed
5677 * PCI layer indicates to libata via this hook that
5678 * hot-unplug or module unload event has occurred.
5679 * Handle this by unregistering all objects associated
5680 * with this PCI device. Free those objects. Then finally
5681 * release PCI resources and disable device.
5684 * Inherited from PCI layer (may sleep).
5687 void ata_pci_remove_one (struct pci_dev *pdev)
5689 struct device *dev = pci_dev_to_dev(pdev);
5690 struct ata_host_set *host_set = dev_get_drvdata(dev);
5691 struct ata_host_set *host_set2 = host_set->next;
5693 ata_host_set_remove(host_set);
5695 ata_host_set_remove(host_set2);
5697 pci_release_regions(pdev);
5698 pci_disable_device(pdev);
5699 dev_set_drvdata(dev, NULL);
5702 /* move to PCI subsystem */
5703 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5705 unsigned long tmp = 0;
5707 switch (bits->width) {
5710 pci_read_config_byte(pdev, bits->reg, &tmp8);
5716 pci_read_config_word(pdev, bits->reg, &tmp16);
5722 pci_read_config_dword(pdev, bits->reg, &tmp32);
5733 return (tmp == bits->val) ? 1 : 0;
5736 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5738 pci_save_state(pdev);
5739 pci_disable_device(pdev);
5740 pci_set_power_state(pdev, PCI_D3hot);
5744 int ata_pci_device_resume(struct pci_dev *pdev)
5746 pci_set_power_state(pdev, PCI_D0);
5747 pci_restore_state(pdev);
5748 pci_enable_device(pdev);
5749 pci_set_master(pdev);
5752 #endif /* CONFIG_PCI */
5755 static int __init ata_init(void)
5757 ata_probe_timeout *= HZ;
5758 ata_wq = create_workqueue("ata");
5762 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5764 destroy_workqueue(ata_wq);
5768 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5772 static void __exit ata_exit(void)
5774 destroy_workqueue(ata_wq);
5775 destroy_workqueue(ata_aux_wq);
5778 module_init(ata_init);
5779 module_exit(ata_exit);
5781 static unsigned long ratelimit_time;
5782 static DEFINE_SPINLOCK(ata_ratelimit_lock);
5784 int ata_ratelimit(void)
5787 unsigned long flags;
5789 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5791 if (time_after(jiffies, ratelimit_time)) {
5793 ratelimit_time = jiffies + (HZ/5);
5797 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5803 * ata_wait_register - wait until register value changes
5804 * @reg: IO-mapped register
5805 * @mask: Mask to apply to read register value
5806 * @val: Wait condition
5807 * @interval_msec: polling interval in milliseconds
5808 * @timeout_msec: timeout in milliseconds
5810 * Waiting for some bits of register to change is a common
5811 * operation for ATA controllers. This function reads 32bit LE
5812 * IO-mapped register @reg and tests for the following condition.
5814 * (*@reg & mask) != val
5816 * If the condition is met, it returns; otherwise, the process is
5817 * repeated after @interval_msec until timeout.
5820 * Kernel thread context (may sleep)
5823 * The final register value.
5825 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5826 unsigned long interval_msec,
5827 unsigned long timeout_msec)
5829 unsigned long timeout;
5832 tmp = ioread32(reg);
5834 /* Calculate timeout _after_ the first read to make sure
5835 * preceding writes reach the controller before starting to
5836 * eat away the timeout.
5838 timeout = jiffies + (timeout_msec * HZ) / 1000;
5840 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5841 msleep(interval_msec);
5842 tmp = ioread32(reg);
5849 * libata is essentially a library of internal helper functions for
5850 * low-level ATA host controller drivers. As such, the API/ABI is
5851 * likely to change as new drivers are added and updated.
5852 * Do not depend on ABI/API stability.
5855 EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5856 EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5857 EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
5858 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5859 EXPORT_SYMBOL_GPL(ata_std_ports);
5860 EXPORT_SYMBOL_GPL(ata_device_add);
5861 EXPORT_SYMBOL_GPL(ata_port_detach);
5862 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5863 EXPORT_SYMBOL_GPL(ata_sg_init);
5864 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5865 EXPORT_SYMBOL_GPL(ata_hsm_move);
5866 EXPORT_SYMBOL_GPL(ata_qc_complete);
5867 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5868 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5869 EXPORT_SYMBOL_GPL(ata_tf_load);
5870 EXPORT_SYMBOL_GPL(ata_tf_read);
5871 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5872 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5873 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5874 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5875 EXPORT_SYMBOL_GPL(ata_check_status);
5876 EXPORT_SYMBOL_GPL(ata_altstatus);
5877 EXPORT_SYMBOL_GPL(ata_exec_command);
5878 EXPORT_SYMBOL_GPL(ata_port_start);
5879 EXPORT_SYMBOL_GPL(ata_port_stop);
5880 EXPORT_SYMBOL_GPL(ata_host_stop);
5881 EXPORT_SYMBOL_GPL(ata_interrupt);
5882 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5883 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5884 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5885 EXPORT_SYMBOL_GPL(ata_qc_prep);
5886 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5887 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5888 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5889 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5890 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5891 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5892 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5893 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5894 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5895 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5896 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5897 EXPORT_SYMBOL_GPL(ata_port_probe);
5898 EXPORT_SYMBOL_GPL(sata_set_spd);
5899 EXPORT_SYMBOL_GPL(sata_phy_debounce);
5900 EXPORT_SYMBOL_GPL(sata_phy_resume);
5901 EXPORT_SYMBOL_GPL(sata_phy_reset);
5902 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5903 EXPORT_SYMBOL_GPL(ata_bus_reset);
5904 EXPORT_SYMBOL_GPL(ata_std_prereset);
5905 EXPORT_SYMBOL_GPL(ata_std_softreset);
5906 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5907 EXPORT_SYMBOL_GPL(ata_std_postreset);
5908 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5909 EXPORT_SYMBOL_GPL(ata_dev_classify);
5910 EXPORT_SYMBOL_GPL(ata_dev_pair);
5911 EXPORT_SYMBOL_GPL(ata_port_disable);
5912 EXPORT_SYMBOL_GPL(ata_ratelimit);
5913 EXPORT_SYMBOL_GPL(ata_wait_register);
5914 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5915 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5916 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5917 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5918 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5919 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5920 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5921 EXPORT_SYMBOL_GPL(ata_scsi_release);
5922 EXPORT_SYMBOL_GPL(ata_host_intr);
5923 EXPORT_SYMBOL_GPL(sata_scr_valid);
5924 EXPORT_SYMBOL_GPL(sata_scr_read);
5925 EXPORT_SYMBOL_GPL(sata_scr_write);
5926 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5927 EXPORT_SYMBOL_GPL(ata_port_online);
5928 EXPORT_SYMBOL_GPL(ata_port_offline);
5929 EXPORT_SYMBOL_GPL(ata_id_string);
5930 EXPORT_SYMBOL_GPL(ata_id_c_string);
5931 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5933 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5934 EXPORT_SYMBOL_GPL(ata_timing_compute);
5935 EXPORT_SYMBOL_GPL(ata_timing_merge);
5938 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5939 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5940 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5941 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5942 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5943 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5944 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5945 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5946 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5947 #endif /* CONFIG_PCI */
5949 EXPORT_SYMBOL_GPL(ata_device_suspend);
5950 EXPORT_SYMBOL_GPL(ata_device_resume);
5951 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5952 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5954 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5955 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5956 EXPORT_SYMBOL_GPL(ata_port_abort);
5957 EXPORT_SYMBOL_GPL(ata_port_freeze);
5958 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5959 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5960 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5961 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5962 EXPORT_SYMBOL_GPL(ata_do_eh);