2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
64 /* debounce timing parameters in msecs { interval, duration, timeout } */
65 const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
66 const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
67 const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
69 static unsigned int ata_dev_init_params(struct ata_device *dev,
70 u16 heads, u16 sectors);
71 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
72 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned int ata_unique_id = 1;
75 static struct workqueue_struct *ata_wq;
77 struct workqueue_struct *ata_aux_wq;
79 int atapi_enabled = 1;
80 module_param(atapi_enabled, int, 0444);
81 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 module_param(atapi_dmadir, int, 0444);
85 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 module_param_named(fua, libata_fua, int, 0444);
89 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
92 module_param(ata_probe_timeout, int, 0444);
93 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95 MODULE_AUTHOR("Jeff Garzik");
96 MODULE_DESCRIPTION("Library module for ATA devices");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_VERSION);
102 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
103 * @tf: Taskfile to convert
104 * @fis: Buffer into which data will output
105 * @pmp: Port multiplier port
107 * Converts a standard ATA taskfile to a Serial ATA
108 * FIS structure (Register - Host to Device).
111 * Inherited from caller.
114 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
116 fis[0] = 0x27; /* Register - Host to Device FIS */
117 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
118 bit 7 indicates Command FIS */
119 fis[2] = tf->command;
120 fis[3] = tf->feature;
127 fis[8] = tf->hob_lbal;
128 fis[9] = tf->hob_lbam;
129 fis[10] = tf->hob_lbah;
130 fis[11] = tf->hob_feature;
133 fis[13] = tf->hob_nsect;
144 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
145 * @fis: Buffer from which data will be input
146 * @tf: Taskfile to output
148 * Converts a serial ATA FIS structure to a standard ATA taskfile.
151 * Inherited from caller.
154 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
156 tf->command = fis[2]; /* status */
157 tf->feature = fis[3]; /* error */
164 tf->hob_lbal = fis[8];
165 tf->hob_lbam = fis[9];
166 tf->hob_lbah = fis[10];
169 tf->hob_nsect = fis[13];
172 static const u8 ata_rw_cmds[] = {
176 ATA_CMD_READ_MULTI_EXT,
177 ATA_CMD_WRITE_MULTI_EXT,
181 ATA_CMD_WRITE_MULTI_FUA_EXT,
185 ATA_CMD_PIO_READ_EXT,
186 ATA_CMD_PIO_WRITE_EXT,
199 ATA_CMD_WRITE_FUA_EXT
203 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
204 * @qc: command to examine and configure
206 * Examine the device configuration and tf->flags to calculate
207 * the proper read/write commands and protocol to use.
212 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
214 struct ata_taskfile *tf = &qc->tf;
215 struct ata_device *dev = qc->dev;
218 int index, fua, lba48, write;
220 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
221 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
222 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
224 if (dev->flags & ATA_DFLAG_PIO) {
225 tf->protocol = ATA_PROT_PIO;
226 index = dev->multi_count ? 0 : 8;
227 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
228 /* Unable to use DMA due to host limitation */
229 tf->protocol = ATA_PROT_PIO;
230 index = dev->multi_count ? 0 : 8;
232 tf->protocol = ATA_PROT_DMA;
236 cmd = ata_rw_cmds[index + fua + lba48 + write];
245 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
246 * @pio_mask: pio_mask
247 * @mwdma_mask: mwdma_mask
248 * @udma_mask: udma_mask
250 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
251 * unsigned int xfer_mask.
259 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
260 unsigned int mwdma_mask,
261 unsigned int udma_mask)
263 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
264 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
265 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
269 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
270 * @xfer_mask: xfer_mask to unpack
271 * @pio_mask: resulting pio_mask
272 * @mwdma_mask: resulting mwdma_mask
273 * @udma_mask: resulting udma_mask
275 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
276 * Any NULL distination masks will be ignored.
278 static void ata_unpack_xfermask(unsigned int xfer_mask,
279 unsigned int *pio_mask,
280 unsigned int *mwdma_mask,
281 unsigned int *udma_mask)
284 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
286 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
288 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
291 static const struct ata_xfer_ent {
295 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
296 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
297 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
302 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
303 * @xfer_mask: xfer_mask of interest
305 * Return matching XFER_* value for @xfer_mask. Only the highest
306 * bit of @xfer_mask is considered.
312 * Matching XFER_* value, 0 if no match found.
314 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
316 int highbit = fls(xfer_mask) - 1;
317 const struct ata_xfer_ent *ent;
319 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
320 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
321 return ent->base + highbit - ent->shift;
326 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
327 * @xfer_mode: XFER_* of interest
329 * Return matching xfer_mask for @xfer_mode.
335 * Matching xfer_mask, 0 if no match found.
337 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
339 const struct ata_xfer_ent *ent;
341 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
342 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
343 return 1 << (ent->shift + xfer_mode - ent->base);
348 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
349 * @xfer_mode: XFER_* of interest
351 * Return matching xfer_shift for @xfer_mode.
357 * Matching xfer_shift, -1 if no match found.
359 static int ata_xfer_mode2shift(unsigned int xfer_mode)
361 const struct ata_xfer_ent *ent;
363 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
364 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
370 * ata_mode_string - convert xfer_mask to string
371 * @xfer_mask: mask of bits supported; only highest bit counts.
373 * Determine string which represents the highest speed
374 * (highest bit in @modemask).
380 * Constant C string representing highest speed listed in
381 * @mode_mask, or the constant C string "<n/a>".
383 static const char *ata_mode_string(unsigned int xfer_mask)
385 static const char * const xfer_mode_str[] = {
405 highbit = fls(xfer_mask) - 1;
406 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
407 return xfer_mode_str[highbit];
411 static const char *sata_spd_string(unsigned int spd)
413 static const char * const spd_str[] = {
418 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
420 return spd_str[spd - 1];
423 void ata_dev_disable(struct ata_device *dev)
425 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
426 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
432 * ata_pio_devchk - PATA device presence detection
433 * @ap: ATA channel to examine
434 * @device: Device to examine (starting at zero)
436 * This technique was originally described in
437 * Hale Landis's ATADRVR (www.ata-atapi.com), and
438 * later found its way into the ATA/ATAPI spec.
440 * Write a pattern to the ATA shadow registers,
441 * and if a device is present, it will respond by
442 * correctly storing and echoing back the
443 * ATA shadow register contents.
449 static unsigned int ata_pio_devchk(struct ata_port *ap,
452 struct ata_ioports *ioaddr = &ap->ioaddr;
455 ap->ops->dev_select(ap, device);
457 outb(0x55, ioaddr->nsect_addr);
458 outb(0xaa, ioaddr->lbal_addr);
460 outb(0xaa, ioaddr->nsect_addr);
461 outb(0x55, ioaddr->lbal_addr);
463 outb(0x55, ioaddr->nsect_addr);
464 outb(0xaa, ioaddr->lbal_addr);
466 nsect = inb(ioaddr->nsect_addr);
467 lbal = inb(ioaddr->lbal_addr);
469 if ((nsect == 0x55) && (lbal == 0xaa))
470 return 1; /* we found a device */
472 return 0; /* nothing found */
476 * ata_mmio_devchk - PATA device presence detection
477 * @ap: ATA channel to examine
478 * @device: Device to examine (starting at zero)
480 * This technique was originally described in
481 * Hale Landis's ATADRVR (www.ata-atapi.com), and
482 * later found its way into the ATA/ATAPI spec.
484 * Write a pattern to the ATA shadow registers,
485 * and if a device is present, it will respond by
486 * correctly storing and echoing back the
487 * ATA shadow register contents.
493 static unsigned int ata_mmio_devchk(struct ata_port *ap,
496 struct ata_ioports *ioaddr = &ap->ioaddr;
499 ap->ops->dev_select(ap, device);
501 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
502 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
505 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
507 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
508 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
510 nsect = readb((void __iomem *) ioaddr->nsect_addr);
511 lbal = readb((void __iomem *) ioaddr->lbal_addr);
513 if ((nsect == 0x55) && (lbal == 0xaa))
514 return 1; /* we found a device */
516 return 0; /* nothing found */
520 * ata_devchk - PATA device presence detection
521 * @ap: ATA channel to examine
522 * @device: Device to examine (starting at zero)
524 * Dispatch ATA device presence detection, depending
525 * on whether we are using PIO or MMIO to talk to the
526 * ATA shadow registers.
532 static unsigned int ata_devchk(struct ata_port *ap,
535 if (ap->flags & ATA_FLAG_MMIO)
536 return ata_mmio_devchk(ap, device);
537 return ata_pio_devchk(ap, device);
541 * ata_dev_classify - determine device type based on ATA-spec signature
542 * @tf: ATA taskfile register set for device to be identified
544 * Determine from taskfile register contents whether a device is
545 * ATA or ATAPI, as per "Signature and persistence" section
546 * of ATA/PI spec (volume 1, sect 5.14).
552 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
553 * the event of failure.
556 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
558 /* Apple's open source Darwin code hints that some devices only
559 * put a proper signature into the LBA mid/high registers,
560 * So, we only check those. It's sufficient for uniqueness.
563 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
564 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
565 DPRINTK("found ATA device by sig\n");
569 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
570 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
571 DPRINTK("found ATAPI device by sig\n");
572 return ATA_DEV_ATAPI;
575 DPRINTK("unknown device\n");
576 return ATA_DEV_UNKNOWN;
580 * ata_dev_try_classify - Parse returned ATA device signature
581 * @ap: ATA channel to examine
582 * @device: Device to examine (starting at zero)
583 * @r_err: Value of error register on completion
585 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
586 * an ATA/ATAPI-defined set of values is placed in the ATA
587 * shadow registers, indicating the results of device detection
590 * Select the ATA device, and read the values from the ATA shadow
591 * registers. Then parse according to the Error register value,
592 * and the spec-defined values examined by ata_dev_classify().
598 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
602 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
604 struct ata_taskfile tf;
608 ap->ops->dev_select(ap, device);
610 memset(&tf, 0, sizeof(tf));
612 ap->ops->tf_read(ap, &tf);
617 /* see if device passed diags */
620 else if ((device == 0) && (err == 0x81))
625 /* determine if device is ATA or ATAPI */
626 class = ata_dev_classify(&tf);
628 if (class == ATA_DEV_UNKNOWN)
630 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
636 * ata_id_string - Convert IDENTIFY DEVICE page into string
637 * @id: IDENTIFY DEVICE results we will examine
638 * @s: string into which data is output
639 * @ofs: offset into identify device page
640 * @len: length of string to return. must be an even number.
642 * The strings in the IDENTIFY DEVICE page are broken up into
643 * 16-bit chunks. Run through the string, and output each
644 * 8-bit chunk linearly, regardless of platform.
650 void ata_id_string(const u16 *id, unsigned char *s,
651 unsigned int ofs, unsigned int len)
670 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
671 * @id: IDENTIFY DEVICE results we will examine
672 * @s: string into which data is output
673 * @ofs: offset into identify device page
674 * @len: length of string to return. must be an odd number.
676 * This function is identical to ata_id_string except that it
677 * trims trailing spaces and terminates the resulting string with
678 * null. @len must be actual maximum length (even number) + 1.
683 void ata_id_c_string(const u16 *id, unsigned char *s,
684 unsigned int ofs, unsigned int len)
690 ata_id_string(id, s, ofs, len - 1);
692 p = s + strnlen(s, len - 1);
693 while (p > s && p[-1] == ' ')
698 static u64 ata_id_n_sectors(const u16 *id)
700 if (ata_id_has_lba(id)) {
701 if (ata_id_has_lba48(id))
702 return ata_id_u64(id, 100);
704 return ata_id_u32(id, 60);
706 if (ata_id_current_chs_valid(id))
707 return ata_id_u32(id, 57);
709 return id[1] * id[3] * id[6];
714 * ata_noop_dev_select - Select device 0/1 on ATA bus
715 * @ap: ATA channel to manipulate
716 * @device: ATA device (numbered from zero) to select
718 * This function performs no actual function.
720 * May be used as the dev_select() entry in ata_port_operations.
725 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
731 * ata_std_dev_select - Select device 0/1 on ATA bus
732 * @ap: ATA channel to manipulate
733 * @device: ATA device (numbered from zero) to select
735 * Use the method defined in the ATA specification to
736 * make either device 0, or device 1, active on the
737 * ATA channel. Works with both PIO and MMIO.
739 * May be used as the dev_select() entry in ata_port_operations.
745 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
750 tmp = ATA_DEVICE_OBS;
752 tmp = ATA_DEVICE_OBS | ATA_DEV1;
754 if (ap->flags & ATA_FLAG_MMIO) {
755 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
757 outb(tmp, ap->ioaddr.device_addr);
759 ata_pause(ap); /* needed; also flushes, for mmio */
763 * ata_dev_select - Select device 0/1 on ATA bus
764 * @ap: ATA channel to manipulate
765 * @device: ATA device (numbered from zero) to select
766 * @wait: non-zero to wait for Status register BSY bit to clear
767 * @can_sleep: non-zero if context allows sleeping
769 * Use the method defined in the ATA specification to
770 * make either device 0, or device 1, active on the
773 * This is a high-level version of ata_std_dev_select(),
774 * which additionally provides the services of inserting
775 * the proper pauses and status polling, where needed.
781 void ata_dev_select(struct ata_port *ap, unsigned int device,
782 unsigned int wait, unsigned int can_sleep)
784 if (ata_msg_probe(ap)) {
785 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
786 "device %u, wait %u\n",
787 ap->id, device, wait);
793 ap->ops->dev_select(ap, device);
796 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
803 * ata_dump_id - IDENTIFY DEVICE info debugging output
804 * @id: IDENTIFY DEVICE page to dump
806 * Dump selected 16-bit words from the given IDENTIFY DEVICE
813 static inline void ata_dump_id(const u16 *id)
815 DPRINTK("49==0x%04x "
825 DPRINTK("80==0x%04x "
835 DPRINTK("88==0x%04x "
842 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
843 * @id: IDENTIFY data to compute xfer mask from
845 * Compute the xfermask for this device. This is not as trivial
846 * as it seems if we must consider early devices correctly.
848 * FIXME: pre IDE drive timing (do we care ?).
856 static unsigned int ata_id_xfermask(const u16 *id)
858 unsigned int pio_mask, mwdma_mask, udma_mask;
860 /* Usual case. Word 53 indicates word 64 is valid */
861 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
862 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
866 /* If word 64 isn't valid then Word 51 high byte holds
867 * the PIO timing number for the maximum. Turn it into
870 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
872 /* But wait.. there's more. Design your standards by
873 * committee and you too can get a free iordy field to
874 * process. However its the speeds not the modes that
875 * are supported... Note drivers using the timing API
876 * will get this right anyway
880 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
883 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
884 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
886 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
890 * ata_port_queue_task - Queue port_task
891 * @ap: The ata_port to queue port_task for
892 * @fn: workqueue function to be scheduled
893 * @data: data value to pass to workqueue function
894 * @delay: delay time for workqueue function
896 * Schedule @fn(@data) for execution after @delay jiffies using
897 * port_task. There is one port_task per port and it's the
898 * user(low level driver)'s responsibility to make sure that only
899 * one task is active at any given time.
901 * libata core layer takes care of synchronization between
902 * port_task and EH. ata_port_queue_task() may be ignored for EH
906 * Inherited from caller.
908 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
913 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
916 PREPARE_WORK(&ap->port_task, fn, data);
919 rc = queue_work(ata_wq, &ap->port_task);
921 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
923 /* rc == 0 means that another user is using port task */
928 * ata_port_flush_task - Flush port_task
929 * @ap: The ata_port to flush port_task for
931 * After this function completes, port_task is guranteed not to
932 * be running or scheduled.
935 * Kernel thread context (may sleep)
937 void ata_port_flush_task(struct ata_port *ap)
943 spin_lock_irqsave(ap->lock, flags);
944 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
945 spin_unlock_irqrestore(ap->lock, flags);
947 DPRINTK("flush #1\n");
948 flush_workqueue(ata_wq);
951 * At this point, if a task is running, it's guaranteed to see
952 * the FLUSH flag; thus, it will never queue pio tasks again.
955 if (!cancel_delayed_work(&ap->port_task)) {
957 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
958 flush_workqueue(ata_wq);
961 spin_lock_irqsave(ap->lock, flags);
962 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
963 spin_unlock_irqrestore(ap->lock, flags);
966 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
969 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
971 struct completion *waiting = qc->private_data;
977 * ata_exec_internal - execute libata internal command
978 * @dev: Device to which the command is sent
979 * @tf: Taskfile registers for the command and the result
980 * @cdb: CDB for packet command
981 * @dma_dir: Data tranfer direction of the command
982 * @buf: Data buffer of the command
983 * @buflen: Length of data buffer
985 * Executes libata internal command with timeout. @tf contains
986 * command on entry and result on return. Timeout and error
987 * conditions are reported via return value. No recovery action
988 * is taken after a command times out. It's caller's duty to
989 * clean up after timeout.
992 * None. Should be called with kernel context, might sleep.
995 * Zero on success, AC_ERR_* mask on failure
997 unsigned ata_exec_internal(struct ata_device *dev,
998 struct ata_taskfile *tf, const u8 *cdb,
999 int dma_dir, void *buf, unsigned int buflen)
1001 struct ata_port *ap = dev->ap;
1002 u8 command = tf->command;
1003 struct ata_queued_cmd *qc;
1004 unsigned int tag, preempted_tag;
1005 u32 preempted_sactive, preempted_qc_active;
1006 DECLARE_COMPLETION(wait);
1007 unsigned long flags;
1008 unsigned int err_mask;
1011 spin_lock_irqsave(ap->lock, flags);
1013 /* no internal command while frozen */
1014 if (ap->flags & ATA_FLAG_FROZEN) {
1015 spin_unlock_irqrestore(ap->lock, flags);
1016 return AC_ERR_SYSTEM;
1019 /* initialize internal qc */
1021 /* XXX: Tag 0 is used for drivers with legacy EH as some
1022 * drivers choke if any other tag is given. This breaks
1023 * ata_tag_internal() test for those drivers. Don't use new
1024 * EH stuff without converting to it.
1026 if (ap->ops->error_handler)
1027 tag = ATA_TAG_INTERNAL;
1031 if (test_and_set_bit(tag, &ap->qc_allocated))
1033 qc = __ata_qc_from_tag(ap, tag);
1041 preempted_tag = ap->active_tag;
1042 preempted_sactive = ap->sactive;
1043 preempted_qc_active = ap->qc_active;
1044 ap->active_tag = ATA_TAG_POISON;
1048 /* prepare & issue qc */
1051 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1052 qc->flags |= ATA_QCFLAG_RESULT_TF;
1053 qc->dma_dir = dma_dir;
1054 if (dma_dir != DMA_NONE) {
1055 ata_sg_init_one(qc, buf, buflen);
1056 qc->nsect = buflen / ATA_SECT_SIZE;
1059 qc->private_data = &wait;
1060 qc->complete_fn = ata_qc_complete_internal;
1064 spin_unlock_irqrestore(ap->lock, flags);
1066 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1068 ata_port_flush_task(ap);
1071 spin_lock_irqsave(ap->lock, flags);
1073 /* We're racing with irq here. If we lose, the
1074 * following test prevents us from completing the qc
1075 * twice. If we win, the port is frozen and will be
1076 * cleaned up by ->post_internal_cmd().
1078 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1079 qc->err_mask |= AC_ERR_TIMEOUT;
1081 if (ap->ops->error_handler)
1082 ata_port_freeze(ap);
1084 ata_qc_complete(qc);
1086 if (ata_msg_warn(ap))
1087 ata_dev_printk(dev, KERN_WARNING,
1088 "qc timeout (cmd 0x%x)\n", command);
1091 spin_unlock_irqrestore(ap->lock, flags);
1094 /* do post_internal_cmd */
1095 if (ap->ops->post_internal_cmd)
1096 ap->ops->post_internal_cmd(qc);
1098 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1099 if (ata_msg_warn(ap))
1100 ata_dev_printk(dev, KERN_WARNING,
1101 "zero err_mask for failed "
1102 "internal command, assuming AC_ERR_OTHER\n");
1103 qc->err_mask |= AC_ERR_OTHER;
1107 spin_lock_irqsave(ap->lock, flags);
1109 *tf = qc->result_tf;
1110 err_mask = qc->err_mask;
1113 ap->active_tag = preempted_tag;
1114 ap->sactive = preempted_sactive;
1115 ap->qc_active = preempted_qc_active;
1117 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1118 * Until those drivers are fixed, we detect the condition
1119 * here, fail the command with AC_ERR_SYSTEM and reenable the
1122 * Note that this doesn't change any behavior as internal
1123 * command failure results in disabling the device in the
1124 * higher layer for LLDDs without new reset/EH callbacks.
1126 * Kill the following code as soon as those drivers are fixed.
1128 if (ap->flags & ATA_FLAG_DISABLED) {
1129 err_mask |= AC_ERR_SYSTEM;
1133 spin_unlock_irqrestore(ap->lock, flags);
1139 * ata_do_simple_cmd - execute simple internal command
1140 * @dev: Device to which the command is sent
1141 * @cmd: Opcode to execute
1143 * Execute a 'simple' command, that only consists of the opcode
1144 * 'cmd' itself, without filling any other registers
1147 * Kernel thread context (may sleep).
1150 * Zero on success, AC_ERR_* mask on failure
1152 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1154 struct ata_taskfile tf;
1156 ata_tf_init(dev, &tf);
1159 tf.flags |= ATA_TFLAG_DEVICE;
1160 tf.protocol = ATA_PROT_NODATA;
1162 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1166 * ata_pio_need_iordy - check if iordy needed
1169 * Check if the current speed of the device requires IORDY. Used
1170 * by various controllers for chip configuration.
1173 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1176 int speed = adev->pio_mode - XFER_PIO_0;
1183 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1185 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1186 pio = adev->id[ATA_ID_EIDE_PIO];
1187 /* Is the speed faster than the drive allows non IORDY ? */
1189 /* This is cycle times not frequency - watch the logic! */
1190 if (pio > 240) /* PIO2 is 240nS per cycle */
1199 * ata_dev_read_id - Read ID data from the specified device
1200 * @dev: target device
1201 * @p_class: pointer to class of the target device (may be changed)
1202 * @post_reset: is this read ID post-reset?
1203 * @id: buffer to read IDENTIFY data into
1205 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1206 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1207 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1208 * for pre-ATA4 drives.
1211 * Kernel thread context (may sleep)
1214 * 0 on success, -errno otherwise.
1216 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1217 int post_reset, u16 *id)
1219 struct ata_port *ap = dev->ap;
1220 unsigned int class = *p_class;
1221 struct ata_taskfile tf;
1222 unsigned int err_mask = 0;
1226 if (ata_msg_ctl(ap))
1227 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1228 __FUNCTION__, ap->id, dev->devno);
1230 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1233 ata_tf_init(dev, &tf);
1237 tf.command = ATA_CMD_ID_ATA;
1240 tf.command = ATA_CMD_ID_ATAPI;
1244 reason = "unsupported class";
1248 tf.protocol = ATA_PROT_PIO;
1250 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1251 id, sizeof(id[0]) * ATA_ID_WORDS);
1254 reason = "I/O error";
1258 swap_buf_le16(id, ATA_ID_WORDS);
1261 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1263 reason = "device reports illegal type";
1267 if (post_reset && class == ATA_DEV_ATA) {
1269 * The exact sequence expected by certain pre-ATA4 drives is:
1272 * INITIALIZE DEVICE PARAMETERS
1274 * Some drives were very specific about that exact sequence.
1276 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1277 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1280 reason = "INIT_DEV_PARAMS failed";
1284 /* current CHS translation info (id[53-58]) might be
1285 * changed. reread the identify device info.
1297 if (ata_msg_warn(ap))
1298 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1299 "(%s, err_mask=0x%x)\n", reason, err_mask);
1303 static inline u8 ata_dev_knobble(struct ata_device *dev)
1305 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1308 static void ata_dev_config_ncq(struct ata_device *dev,
1309 char *desc, size_t desc_sz)
1311 struct ata_port *ap = dev->ap;
1312 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1314 if (!ata_id_has_ncq(dev->id)) {
1319 if (ap->flags & ATA_FLAG_NCQ) {
1320 hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1);
1321 dev->flags |= ATA_DFLAG_NCQ;
1324 if (hdepth >= ddepth)
1325 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1327 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1331 * ata_dev_configure - Configure the specified ATA/ATAPI device
1332 * @dev: Target device to configure
1333 * @print_info: Enable device info printout
1335 * Configure @dev according to @dev->id. Generic and low-level
1336 * driver specific fixups are also applied.
1339 * Kernel thread context (may sleep)
1342 * 0 on success, -errno otherwise
1344 int ata_dev_configure(struct ata_device *dev, int print_info)
1346 struct ata_port *ap = dev->ap;
1347 const u16 *id = dev->id;
1348 unsigned int xfer_mask;
1351 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1352 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1353 __FUNCTION__, ap->id, dev->devno);
1357 if (ata_msg_probe(ap))
1358 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1359 __FUNCTION__, ap->id, dev->devno);
1361 /* print device capabilities */
1362 if (ata_msg_probe(ap))
1363 ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
1364 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1366 id[49], id[82], id[83], id[84],
1367 id[85], id[86], id[87], id[88]);
1369 /* initialize to-be-configured parameters */
1370 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1371 dev->max_sectors = 0;
1379 * common ATA, ATAPI feature tests
1382 /* find max transfer mode; for printk only */
1383 xfer_mask = ata_id_xfermask(id);
1385 if (ata_msg_probe(ap))
1388 /* ATA-specific feature tests */
1389 if (dev->class == ATA_DEV_ATA) {
1390 dev->n_sectors = ata_id_n_sectors(id);
1392 if (ata_id_has_lba(id)) {
1393 const char *lba_desc;
1397 dev->flags |= ATA_DFLAG_LBA;
1398 if (ata_id_has_lba48(id)) {
1399 dev->flags |= ATA_DFLAG_LBA48;
1404 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1406 /* print device info to dmesg */
1407 if (ata_msg_info(ap))
1408 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1409 "max %s, %Lu sectors: %s %s\n",
1410 ata_id_major_version(id),
1411 ata_mode_string(xfer_mask),
1412 (unsigned long long)dev->n_sectors,
1413 lba_desc, ncq_desc);
1417 /* Default translation */
1418 dev->cylinders = id[1];
1420 dev->sectors = id[6];
1422 if (ata_id_current_chs_valid(id)) {
1423 /* Current CHS translation is valid. */
1424 dev->cylinders = id[54];
1425 dev->heads = id[55];
1426 dev->sectors = id[56];
1429 /* print device info to dmesg */
1430 if (ata_msg_info(ap))
1431 ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
1432 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1433 ata_id_major_version(id),
1434 ata_mode_string(xfer_mask),
1435 (unsigned long long)dev->n_sectors,
1436 dev->cylinders, dev->heads, dev->sectors);
1439 if (dev->id[59] & 0x100) {
1440 dev->multi_count = dev->id[59] & 0xff;
1441 if (ata_msg_info(ap))
1442 ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
1443 ap->id, dev->devno, dev->multi_count);
1449 /* ATAPI-specific feature tests */
1450 else if (dev->class == ATA_DEV_ATAPI) {
1451 char *cdb_intr_string = "";
1453 rc = atapi_cdb_len(id);
1454 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1455 if (ata_msg_warn(ap))
1456 ata_dev_printk(dev, KERN_WARNING,
1457 "unsupported CDB len\n");
1461 dev->cdb_len = (unsigned int) rc;
1463 if (ata_id_cdb_intr(dev->id)) {
1464 dev->flags |= ATA_DFLAG_CDB_INTR;
1465 cdb_intr_string = ", CDB intr";
1468 /* print device info to dmesg */
1469 if (ata_msg_info(ap))
1470 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1471 ata_mode_string(xfer_mask),
1475 ap->host->max_cmd_len = 0;
1476 for (i = 0; i < ATA_MAX_DEVICES; i++)
1477 ap->host->max_cmd_len = max_t(unsigned int,
1478 ap->host->max_cmd_len,
1479 ap->device[i].cdb_len);
1481 /* limit bridge transfers to udma5, 200 sectors */
1482 if (ata_dev_knobble(dev)) {
1483 if (ata_msg_info(ap))
1484 ata_dev_printk(dev, KERN_INFO,
1485 "applying bridge limits\n");
1486 dev->udma_mask &= ATA_UDMA5;
1487 dev->max_sectors = ATA_MAX_SECTORS;
1490 if (ap->ops->dev_config)
1491 ap->ops->dev_config(ap, dev);
1493 if (ata_msg_probe(ap))
1494 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1495 __FUNCTION__, ata_chk_status(ap));
1499 if (ata_msg_probe(ap))
1500 ata_dev_printk(dev, KERN_DEBUG,
1501 "%s: EXIT, err\n", __FUNCTION__);
1506 * ata_bus_probe - Reset and probe ATA bus
1509 * Master ATA bus probing function. Initiates a hardware-dependent
1510 * bus reset, then attempts to identify any devices found on
1514 * PCI/etc. bus probe sem.
1517 * Zero on success, negative errno otherwise.
1520 static int ata_bus_probe(struct ata_port *ap)
1522 unsigned int classes[ATA_MAX_DEVICES];
1523 int tries[ATA_MAX_DEVICES];
1524 int i, rc, down_xfermask;
1525 struct ata_device *dev;
1529 for (i = 0; i < ATA_MAX_DEVICES; i++)
1530 tries[i] = ATA_PROBE_MAX_TRIES;
1535 /* reset and determine device classes */
1536 ap->ops->phy_reset(ap);
1538 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1539 dev = &ap->device[i];
1541 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1542 dev->class != ATA_DEV_UNKNOWN)
1543 classes[dev->devno] = dev->class;
1545 classes[dev->devno] = ATA_DEV_NONE;
1547 dev->class = ATA_DEV_UNKNOWN;
1552 /* after the reset the device state is PIO 0 and the controller
1553 state is undefined. Record the mode */
1555 for (i = 0; i < ATA_MAX_DEVICES; i++)
1556 ap->device[i].pio_mode = XFER_PIO_0;
1558 /* read IDENTIFY page and configure devices */
1559 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1560 dev = &ap->device[i];
1563 dev->class = classes[i];
1565 if (!ata_dev_enabled(dev))
1568 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1572 rc = ata_dev_configure(dev, 1);
1577 /* configure transfer mode */
1578 rc = ata_set_mode(ap, &dev);
1584 for (i = 0; i < ATA_MAX_DEVICES; i++)
1585 if (ata_dev_enabled(&ap->device[i]))
1588 /* no device present, disable port */
1589 ata_port_disable(ap);
1590 ap->ops->port_disable(ap);
1597 tries[dev->devno] = 0;
1600 sata_down_spd_limit(ap);
1603 tries[dev->devno]--;
1604 if (down_xfermask &&
1605 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1606 tries[dev->devno] = 0;
1609 if (!tries[dev->devno]) {
1610 ata_down_xfermask_limit(dev, 1);
1611 ata_dev_disable(dev);
1618 * ata_port_probe - Mark port as enabled
1619 * @ap: Port for which we indicate enablement
1621 * Modify @ap data structure such that the system
1622 * thinks that the entire port is enabled.
1624 * LOCKING: host_set lock, or some other form of
1628 void ata_port_probe(struct ata_port *ap)
1630 ap->flags &= ~ATA_FLAG_DISABLED;
1634 * sata_print_link_status - Print SATA link status
1635 * @ap: SATA port to printk link status about
1637 * This function prints link speed and status of a SATA link.
1642 static void sata_print_link_status(struct ata_port *ap)
1644 u32 sstatus, scontrol, tmp;
1646 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1648 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1650 if (ata_port_online(ap)) {
1651 tmp = (sstatus >> 4) & 0xf;
1652 ata_port_printk(ap, KERN_INFO,
1653 "SATA link up %s (SStatus %X SControl %X)\n",
1654 sata_spd_string(tmp), sstatus, scontrol);
1656 ata_port_printk(ap, KERN_INFO,
1657 "SATA link down (SStatus %X SControl %X)\n",
1663 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1664 * @ap: SATA port associated with target SATA PHY.
1666 * This function issues commands to standard SATA Sxxx
1667 * PHY registers, to wake up the phy (and device), and
1668 * clear any reset condition.
1671 * PCI/etc. bus probe sem.
1674 void __sata_phy_reset(struct ata_port *ap)
1677 unsigned long timeout = jiffies + (HZ * 5);
1679 if (ap->flags & ATA_FLAG_SATA_RESET) {
1680 /* issue phy wake/reset */
1681 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1682 /* Couldn't find anything in SATA I/II specs, but
1683 * AHCI-1.1 10.4.2 says at least 1 ms. */
1686 /* phy wake/clear reset */
1687 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1689 /* wait for phy to become ready, if necessary */
1692 sata_scr_read(ap, SCR_STATUS, &sstatus);
1693 if ((sstatus & 0xf) != 1)
1695 } while (time_before(jiffies, timeout));
1697 /* print link status */
1698 sata_print_link_status(ap);
1700 /* TODO: phy layer with polling, timeouts, etc. */
1701 if (!ata_port_offline(ap))
1704 ata_port_disable(ap);
1706 if (ap->flags & ATA_FLAG_DISABLED)
1709 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1710 ata_port_disable(ap);
1714 ap->cbl = ATA_CBL_SATA;
1718 * sata_phy_reset - Reset SATA bus.
1719 * @ap: SATA port associated with target SATA PHY.
1721 * This function resets the SATA bus, and then probes
1722 * the bus for devices.
1725 * PCI/etc. bus probe sem.
1728 void sata_phy_reset(struct ata_port *ap)
1730 __sata_phy_reset(ap);
1731 if (ap->flags & ATA_FLAG_DISABLED)
1737 * ata_dev_pair - return other device on cable
1740 * Obtain the other device on the same cable, or if none is
1741 * present NULL is returned
1744 struct ata_device *ata_dev_pair(struct ata_device *adev)
1746 struct ata_port *ap = adev->ap;
1747 struct ata_device *pair = &ap->device[1 - adev->devno];
1748 if (!ata_dev_enabled(pair))
1754 * ata_port_disable - Disable port.
1755 * @ap: Port to be disabled.
1757 * Modify @ap data structure such that the system
1758 * thinks that the entire port is disabled, and should
1759 * never attempt to probe or communicate with devices
1762 * LOCKING: host_set lock, or some other form of
1766 void ata_port_disable(struct ata_port *ap)
1768 ap->device[0].class = ATA_DEV_NONE;
1769 ap->device[1].class = ATA_DEV_NONE;
1770 ap->flags |= ATA_FLAG_DISABLED;
1774 * sata_down_spd_limit - adjust SATA spd limit downward
1775 * @ap: Port to adjust SATA spd limit for
1777 * Adjust SATA spd limit of @ap downward. Note that this
1778 * function only adjusts the limit. The change must be applied
1779 * using sata_set_spd().
1782 * Inherited from caller.
1785 * 0 on success, negative errno on failure
1787 int sata_down_spd_limit(struct ata_port *ap)
1789 u32 sstatus, spd, mask;
1792 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1796 mask = ap->sata_spd_limit;
1799 highbit = fls(mask) - 1;
1800 mask &= ~(1 << highbit);
1802 spd = (sstatus >> 4) & 0xf;
1806 mask &= (1 << spd) - 1;
1810 ap->sata_spd_limit = mask;
1812 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1813 sata_spd_string(fls(mask)));
1818 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1822 if (ap->sata_spd_limit == UINT_MAX)
1825 limit = fls(ap->sata_spd_limit);
1827 spd = (*scontrol >> 4) & 0xf;
1828 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1830 return spd != limit;
1834 * sata_set_spd_needed - is SATA spd configuration needed
1835 * @ap: Port in question
1837 * Test whether the spd limit in SControl matches
1838 * @ap->sata_spd_limit. This function is used to determine
1839 * whether hardreset is necessary to apply SATA spd
1843 * Inherited from caller.
1846 * 1 if SATA spd configuration is needed, 0 otherwise.
1848 int sata_set_spd_needed(struct ata_port *ap)
1852 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1855 return __sata_set_spd_needed(ap, &scontrol);
1859 * sata_set_spd - set SATA spd according to spd limit
1860 * @ap: Port to set SATA spd for
1862 * Set SATA spd of @ap according to sata_spd_limit.
1865 * Inherited from caller.
1868 * 0 if spd doesn't need to be changed, 1 if spd has been
1869 * changed. Negative errno if SCR registers are inaccessible.
1871 int sata_set_spd(struct ata_port *ap)
1876 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1879 if (!__sata_set_spd_needed(ap, &scontrol))
1882 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1889 * This mode timing computation functionality is ported over from
1890 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1893 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1894 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1895 * for PIO 5, which is a nonstandard extension and UDMA6, which
1896 * is currently supported only by Maxtor drives.
1899 static const struct ata_timing ata_timing[] = {
1901 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1902 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1903 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1904 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1906 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1907 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1908 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1910 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1912 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1913 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1914 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1916 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1917 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1918 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1920 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1921 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1922 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1924 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1925 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1926 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1928 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1933 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1934 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1936 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1938 q->setup = EZ(t->setup * 1000, T);
1939 q->act8b = EZ(t->act8b * 1000, T);
1940 q->rec8b = EZ(t->rec8b * 1000, T);
1941 q->cyc8b = EZ(t->cyc8b * 1000, T);
1942 q->active = EZ(t->active * 1000, T);
1943 q->recover = EZ(t->recover * 1000, T);
1944 q->cycle = EZ(t->cycle * 1000, T);
1945 q->udma = EZ(t->udma * 1000, UT);
1948 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1949 struct ata_timing *m, unsigned int what)
1951 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1952 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1953 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1954 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1955 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1956 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1957 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1958 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1961 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1963 const struct ata_timing *t;
1965 for (t = ata_timing; t->mode != speed; t++)
1966 if (t->mode == 0xFF)
1971 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1972 struct ata_timing *t, int T, int UT)
1974 const struct ata_timing *s;
1975 struct ata_timing p;
1981 if (!(s = ata_timing_find_mode(speed)))
1984 memcpy(t, s, sizeof(*s));
1987 * If the drive is an EIDE drive, it can tell us it needs extended
1988 * PIO/MW_DMA cycle timing.
1991 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1992 memset(&p, 0, sizeof(p));
1993 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1994 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1995 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1996 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1997 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1999 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2003 * Convert the timing to bus clock counts.
2006 ata_timing_quantize(t, t, T, UT);
2009 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2010 * S.M.A.R.T * and some other commands. We have to ensure that the
2011 * DMA cycle timing is slower/equal than the fastest PIO timing.
2014 if (speed > XFER_PIO_4) {
2015 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2016 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2020 * Lengthen active & recovery time so that cycle time is correct.
2023 if (t->act8b + t->rec8b < t->cyc8b) {
2024 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2025 t->rec8b = t->cyc8b - t->act8b;
2028 if (t->active + t->recover < t->cycle) {
2029 t->active += (t->cycle - (t->active + t->recover)) / 2;
2030 t->recover = t->cycle - t->active;
2037 * ata_down_xfermask_limit - adjust dev xfer masks downward
2038 * @dev: Device to adjust xfer masks
2039 * @force_pio0: Force PIO0
2041 * Adjust xfer masks of @dev downward. Note that this function
2042 * does not apply the change. Invoking ata_set_mode() afterwards
2043 * will apply the limit.
2046 * Inherited from caller.
2049 * 0 on success, negative errno on failure
2051 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2053 unsigned long xfer_mask;
2056 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2061 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2062 if (xfer_mask & ATA_MASK_UDMA)
2063 xfer_mask &= ~ATA_MASK_MWDMA;
2065 highbit = fls(xfer_mask) - 1;
2066 xfer_mask &= ~(1 << highbit);
2068 xfer_mask &= 1 << ATA_SHIFT_PIO;
2072 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2075 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2076 ata_mode_string(xfer_mask));
2084 static int ata_dev_set_mode(struct ata_device *dev)
2086 unsigned int err_mask;
2089 dev->flags &= ~ATA_DFLAG_PIO;
2090 if (dev->xfer_shift == ATA_SHIFT_PIO)
2091 dev->flags |= ATA_DFLAG_PIO;
2093 err_mask = ata_dev_set_xfermode(dev);
2095 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2096 "(err_mask=0x%x)\n", err_mask);
2100 rc = ata_dev_revalidate(dev, 0);
2104 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2105 dev->xfer_shift, (int)dev->xfer_mode);
2107 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2108 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2113 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2114 * @ap: port on which timings will be programmed
2115 * @r_failed_dev: out paramter for failed device
2117 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2118 * ata_set_mode() fails, pointer to the failing device is
2119 * returned in @r_failed_dev.
2122 * PCI/etc. bus probe sem.
2125 * 0 on success, negative errno otherwise
2127 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2129 struct ata_device *dev;
2130 int i, rc = 0, used_dma = 0, found = 0;
2132 /* has private set_mode? */
2133 if (ap->ops->set_mode) {
2134 /* FIXME: make ->set_mode handle no device case and
2135 * return error code and failing device on failure.
2137 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2138 if (ata_dev_enabled(&ap->device[i])) {
2139 ap->ops->set_mode(ap);
2146 /* step 1: calculate xfer_mask */
2147 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2148 unsigned int pio_mask, dma_mask;
2150 dev = &ap->device[i];
2152 if (!ata_dev_enabled(dev))
2155 ata_dev_xfermask(dev);
2157 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2158 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2159 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2160 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2169 /* step 2: always set host PIO timings */
2170 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2171 dev = &ap->device[i];
2172 if (!ata_dev_enabled(dev))
2175 if (!dev->pio_mode) {
2176 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2181 dev->xfer_mode = dev->pio_mode;
2182 dev->xfer_shift = ATA_SHIFT_PIO;
2183 if (ap->ops->set_piomode)
2184 ap->ops->set_piomode(ap, dev);
2187 /* step 3: set host DMA timings */
2188 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2189 dev = &ap->device[i];
2191 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2194 dev->xfer_mode = dev->dma_mode;
2195 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2196 if (ap->ops->set_dmamode)
2197 ap->ops->set_dmamode(ap, dev);
2200 /* step 4: update devices' xfer mode */
2201 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2202 dev = &ap->device[i];
2204 if (!ata_dev_enabled(dev))
2207 rc = ata_dev_set_mode(dev);
2212 /* Record simplex status. If we selected DMA then the other
2213 * host channels are not permitted to do so.
2215 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
2216 ap->host_set->simplex_claimed = 1;
2218 /* step5: chip specific finalisation */
2219 if (ap->ops->post_set_mode)
2220 ap->ops->post_set_mode(ap);
2224 *r_failed_dev = dev;
2229 * ata_tf_to_host - issue ATA taskfile to host controller
2230 * @ap: port to which command is being issued
2231 * @tf: ATA taskfile register set
2233 * Issues ATA taskfile register set to ATA host controller,
2234 * with proper synchronization with interrupt handler and
2238 * spin_lock_irqsave(host_set lock)
2241 static inline void ata_tf_to_host(struct ata_port *ap,
2242 const struct ata_taskfile *tf)
2244 ap->ops->tf_load(ap, tf);
2245 ap->ops->exec_command(ap, tf);
2249 * ata_busy_sleep - sleep until BSY clears, or timeout
2250 * @ap: port containing status register to be polled
2251 * @tmout_pat: impatience timeout
2252 * @tmout: overall timeout
2254 * Sleep until ATA Status register bit BSY clears,
2255 * or a timeout occurs.
2260 unsigned int ata_busy_sleep (struct ata_port *ap,
2261 unsigned long tmout_pat, unsigned long tmout)
2263 unsigned long timer_start, timeout;
2266 status = ata_busy_wait(ap, ATA_BUSY, 300);
2267 timer_start = jiffies;
2268 timeout = timer_start + tmout_pat;
2269 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2271 status = ata_busy_wait(ap, ATA_BUSY, 3);
2274 if (status & ATA_BUSY)
2275 ata_port_printk(ap, KERN_WARNING,
2276 "port is slow to respond, please be patient\n");
2278 timeout = timer_start + tmout;
2279 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2281 status = ata_chk_status(ap);
2284 if (status & ATA_BUSY) {
2285 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2286 "(%lu secs)\n", tmout / HZ);
2293 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2295 struct ata_ioports *ioaddr = &ap->ioaddr;
2296 unsigned int dev0 = devmask & (1 << 0);
2297 unsigned int dev1 = devmask & (1 << 1);
2298 unsigned long timeout;
2300 /* if device 0 was found in ata_devchk, wait for its
2304 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2306 /* if device 1 was found in ata_devchk, wait for
2307 * register access, then wait for BSY to clear
2309 timeout = jiffies + ATA_TMOUT_BOOT;
2313 ap->ops->dev_select(ap, 1);
2314 if (ap->flags & ATA_FLAG_MMIO) {
2315 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2316 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2318 nsect = inb(ioaddr->nsect_addr);
2319 lbal = inb(ioaddr->lbal_addr);
2321 if ((nsect == 1) && (lbal == 1))
2323 if (time_after(jiffies, timeout)) {
2327 msleep(50); /* give drive a breather */
2330 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2332 /* is all this really necessary? */
2333 ap->ops->dev_select(ap, 0);
2335 ap->ops->dev_select(ap, 1);
2337 ap->ops->dev_select(ap, 0);
2340 static unsigned int ata_bus_softreset(struct ata_port *ap,
2341 unsigned int devmask)
2343 struct ata_ioports *ioaddr = &ap->ioaddr;
2345 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2347 /* software reset. causes dev0 to be selected */
2348 if (ap->flags & ATA_FLAG_MMIO) {
2349 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2350 udelay(20); /* FIXME: flush */
2351 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2352 udelay(20); /* FIXME: flush */
2353 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2355 outb(ap->ctl, ioaddr->ctl_addr);
2357 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2359 outb(ap->ctl, ioaddr->ctl_addr);
2362 /* spec mandates ">= 2ms" before checking status.
2363 * We wait 150ms, because that was the magic delay used for
2364 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2365 * between when the ATA command register is written, and then
2366 * status is checked. Because waiting for "a while" before
2367 * checking status is fine, post SRST, we perform this magic
2368 * delay here as well.
2370 * Old drivers/ide uses the 2mS rule and then waits for ready
2374 /* Before we perform post reset processing we want to see if
2375 * the bus shows 0xFF because the odd clown forgets the D7
2376 * pulldown resistor.
2378 if (ata_check_status(ap) == 0xFF) {
2379 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2380 return AC_ERR_OTHER;
2383 ata_bus_post_reset(ap, devmask);
2389 * ata_bus_reset - reset host port and associated ATA channel
2390 * @ap: port to reset
2392 * This is typically the first time we actually start issuing
2393 * commands to the ATA channel. We wait for BSY to clear, then
2394 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2395 * result. Determine what devices, if any, are on the channel
2396 * by looking at the device 0/1 error register. Look at the signature
2397 * stored in each device's taskfile registers, to determine if
2398 * the device is ATA or ATAPI.
2401 * PCI/etc. bus probe sem.
2402 * Obtains host_set lock.
2405 * Sets ATA_FLAG_DISABLED if bus reset fails.
2408 void ata_bus_reset(struct ata_port *ap)
2410 struct ata_ioports *ioaddr = &ap->ioaddr;
2411 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2413 unsigned int dev0, dev1 = 0, devmask = 0;
2415 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2417 /* determine if device 0/1 are present */
2418 if (ap->flags & ATA_FLAG_SATA_RESET)
2421 dev0 = ata_devchk(ap, 0);
2423 dev1 = ata_devchk(ap, 1);
2427 devmask |= (1 << 0);
2429 devmask |= (1 << 1);
2431 /* select device 0 again */
2432 ap->ops->dev_select(ap, 0);
2434 /* issue bus reset */
2435 if (ap->flags & ATA_FLAG_SRST)
2436 if (ata_bus_softreset(ap, devmask))
2440 * determine by signature whether we have ATA or ATAPI devices
2442 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2443 if ((slave_possible) && (err != 0x81))
2444 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2446 /* re-enable interrupts */
2447 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2450 /* is double-select really necessary? */
2451 if (ap->device[1].class != ATA_DEV_NONE)
2452 ap->ops->dev_select(ap, 1);
2453 if (ap->device[0].class != ATA_DEV_NONE)
2454 ap->ops->dev_select(ap, 0);
2456 /* if no devices were detected, disable this port */
2457 if ((ap->device[0].class == ATA_DEV_NONE) &&
2458 (ap->device[1].class == ATA_DEV_NONE))
2461 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2462 /* set up device control for ATA_FLAG_SATA_RESET */
2463 if (ap->flags & ATA_FLAG_MMIO)
2464 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2466 outb(ap->ctl, ioaddr->ctl_addr);
2473 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2474 ap->ops->port_disable(ap);
2480 * sata_phy_debounce - debounce SATA phy status
2481 * @ap: ATA port to debounce SATA phy status for
2482 * @params: timing parameters { interval, duratinon, timeout } in msec
2484 * Make sure SStatus of @ap reaches stable state, determined by
2485 * holding the same value where DET is not 1 for @duration polled
2486 * every @interval, before @timeout. Timeout constraints the
2487 * beginning of the stable state. Because, after hot unplugging,
2488 * DET gets stuck at 1 on some controllers, this functions waits
2489 * until timeout then returns 0 if DET is stable at 1.
2492 * Kernel thread context (may sleep)
2495 * 0 on success, -errno on failure.
2497 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2499 unsigned long interval_msec = params[0];
2500 unsigned long duration = params[1] * HZ / 1000;
2501 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2502 unsigned long last_jiffies;
2506 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2511 last_jiffies = jiffies;
2514 msleep(interval_msec);
2515 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2521 if (cur == 1 && time_before(jiffies, timeout))
2523 if (time_after(jiffies, last_jiffies + duration))
2528 /* unstable, start over */
2530 last_jiffies = jiffies;
2533 if (time_after(jiffies, timeout))
2539 * sata_phy_resume - resume SATA phy
2540 * @ap: ATA port to resume SATA phy for
2541 * @params: timing parameters { interval, duratinon, timeout } in msec
2543 * Resume SATA phy of @ap and debounce it.
2546 * Kernel thread context (may sleep)
2549 * 0 on success, -errno on failure.
2551 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2556 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2559 scontrol = (scontrol & 0x0f0) | 0x300;
2561 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2564 /* Some PHYs react badly if SStatus is pounded immediately
2565 * after resuming. Delay 200ms before debouncing.
2569 return sata_phy_debounce(ap, params);
2572 static void ata_wait_spinup(struct ata_port *ap)
2574 struct ata_eh_context *ehc = &ap->eh_context;
2575 unsigned long end, secs;
2578 /* first, debounce phy if SATA */
2579 if (ap->cbl == ATA_CBL_SATA) {
2580 rc = sata_phy_debounce(ap, sata_deb_timing_eh);
2582 /* if debounced successfully and offline, no need to wait */
2583 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2587 /* okay, let's give the drive time to spin up */
2588 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2589 secs = ((end - jiffies) + HZ - 1) / HZ;
2591 if (time_after(jiffies, end))
2595 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2596 "(%lu secs)\n", secs);
2598 schedule_timeout_uninterruptible(end - jiffies);
2602 * ata_std_prereset - prepare for reset
2603 * @ap: ATA port to be reset
2605 * @ap is about to be reset. Initialize it.
2608 * Kernel thread context (may sleep)
2611 * 0 on success, -errno otherwise.
2613 int ata_std_prereset(struct ata_port *ap)
2615 struct ata_eh_context *ehc = &ap->eh_context;
2616 const unsigned long *timing;
2620 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
2621 if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
2622 ehc->i.action |= ATA_EH_HARDRESET;
2623 if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
2624 ata_wait_spinup(ap);
2627 /* if we're about to do hardreset, nothing more to do */
2628 if (ehc->i.action & ATA_EH_HARDRESET)
2631 /* if SATA, resume phy */
2632 if (ap->cbl == ATA_CBL_SATA) {
2633 if (ap->flags & ATA_FLAG_LOADING)
2634 timing = sata_deb_timing_boot;
2636 timing = sata_deb_timing_eh;
2638 rc = sata_phy_resume(ap, timing);
2639 if (rc && rc != -EOPNOTSUPP) {
2640 /* phy resume failed */
2641 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2642 "link for reset (errno=%d)\n", rc);
2647 /* Wait for !BSY if the controller can wait for the first D2H
2648 * Reg FIS and we don't know that no device is attached.
2650 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2651 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2657 * ata_std_softreset - reset host port via ATA SRST
2658 * @ap: port to reset
2659 * @classes: resulting classes of attached devices
2661 * Reset host port using ATA SRST.
2664 * Kernel thread context (may sleep)
2667 * 0 on success, -errno otherwise.
2669 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2671 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2672 unsigned int devmask = 0, err_mask;
2677 if (ata_port_offline(ap)) {
2678 classes[0] = ATA_DEV_NONE;
2682 /* determine if device 0/1 are present */
2683 if (ata_devchk(ap, 0))
2684 devmask |= (1 << 0);
2685 if (slave_possible && ata_devchk(ap, 1))
2686 devmask |= (1 << 1);
2688 /* select device 0 again */
2689 ap->ops->dev_select(ap, 0);
2691 /* issue bus reset */
2692 DPRINTK("about to softreset, devmask=%x\n", devmask);
2693 err_mask = ata_bus_softreset(ap, devmask);
2695 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2700 /* determine by signature whether we have ATA or ATAPI devices */
2701 classes[0] = ata_dev_try_classify(ap, 0, &err);
2702 if (slave_possible && err != 0x81)
2703 classes[1] = ata_dev_try_classify(ap, 1, &err);
2706 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2711 * sata_std_hardreset - reset host port via SATA phy reset
2712 * @ap: port to reset
2713 * @class: resulting class of attached device
2715 * SATA phy-reset host port using DET bits of SControl register.
2718 * Kernel thread context (may sleep)
2721 * 0 on success, -errno otherwise.
2723 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2730 if (sata_set_spd_needed(ap)) {
2731 /* SATA spec says nothing about how to reconfigure
2732 * spd. To be on the safe side, turn off phy during
2733 * reconfiguration. This works for at least ICH7 AHCI
2736 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2739 scontrol = (scontrol & 0x0f0) | 0x302;
2741 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2747 /* issue phy wake/reset */
2748 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2751 scontrol = (scontrol & 0x0f0) | 0x301;
2753 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2756 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2757 * 10.4.2 says at least 1 ms.
2761 /* bring phy back */
2762 sata_phy_resume(ap, sata_deb_timing_eh);
2764 /* TODO: phy layer with polling, timeouts, etc. */
2765 if (ata_port_offline(ap)) {
2766 *class = ATA_DEV_NONE;
2767 DPRINTK("EXIT, link offline\n");
2771 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2772 ata_port_printk(ap, KERN_ERR,
2773 "COMRESET failed (device not ready)\n");
2777 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2779 *class = ata_dev_try_classify(ap, 0, NULL);
2781 DPRINTK("EXIT, class=%u\n", *class);
2786 * ata_std_postreset - standard postreset callback
2787 * @ap: the target ata_port
2788 * @classes: classes of attached devices
2790 * This function is invoked after a successful reset. Note that
2791 * the device might have been reset more than once using
2792 * different reset methods before postreset is invoked.
2795 * Kernel thread context (may sleep)
2797 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2803 /* print link status */
2804 sata_print_link_status(ap);
2807 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2808 sata_scr_write(ap, SCR_ERROR, serror);
2810 /* re-enable interrupts */
2811 if (!ap->ops->error_handler) {
2812 /* FIXME: hack. create a hook instead */
2813 if (ap->ioaddr.ctl_addr)
2817 /* is double-select really necessary? */
2818 if (classes[0] != ATA_DEV_NONE)
2819 ap->ops->dev_select(ap, 1);
2820 if (classes[1] != ATA_DEV_NONE)
2821 ap->ops->dev_select(ap, 0);
2823 /* bail out if no device is present */
2824 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2825 DPRINTK("EXIT, no device\n");
2829 /* set up device control */
2830 if (ap->ioaddr.ctl_addr) {
2831 if (ap->flags & ATA_FLAG_MMIO)
2832 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2834 outb(ap->ctl, ap->ioaddr.ctl_addr);
2841 * ata_dev_same_device - Determine whether new ID matches configured device
2842 * @dev: device to compare against
2843 * @new_class: class of the new device
2844 * @new_id: IDENTIFY page of the new device
2846 * Compare @new_class and @new_id against @dev and determine
2847 * whether @dev is the device indicated by @new_class and
2854 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2856 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2859 const u16 *old_id = dev->id;
2860 unsigned char model[2][41], serial[2][21];
2863 if (dev->class != new_class) {
2864 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2865 dev->class, new_class);
2869 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2870 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2871 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2872 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2873 new_n_sectors = ata_id_n_sectors(new_id);
2875 if (strcmp(model[0], model[1])) {
2876 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2877 "'%s' != '%s'\n", model[0], model[1]);
2881 if (strcmp(serial[0], serial[1])) {
2882 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2883 "'%s' != '%s'\n", serial[0], serial[1]);
2887 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2888 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2890 (unsigned long long)dev->n_sectors,
2891 (unsigned long long)new_n_sectors);
2899 * ata_dev_revalidate - Revalidate ATA device
2900 * @dev: device to revalidate
2901 * @post_reset: is this revalidation after reset?
2903 * Re-read IDENTIFY page and make sure @dev is still attached to
2907 * Kernel thread context (may sleep)
2910 * 0 on success, negative errno otherwise
2912 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2914 unsigned int class = dev->class;
2915 u16 *id = (void *)dev->ap->sector_buf;
2918 if (!ata_dev_enabled(dev)) {
2924 rc = ata_dev_read_id(dev, &class, post_reset, id);
2928 /* is the device still there? */
2929 if (!ata_dev_same_device(dev, class, id)) {
2934 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
2936 /* configure device according to the new ID */
2937 rc = ata_dev_configure(dev, 0);
2942 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
2946 static const char * const ata_dma_blacklist [] = {
2947 "WDC AC11000H", NULL,
2948 "WDC AC22100H", NULL,
2949 "WDC AC32500H", NULL,
2950 "WDC AC33100H", NULL,
2951 "WDC AC31600H", NULL,
2952 "WDC AC32100H", "24.09P07",
2953 "WDC AC23200L", "21.10N21",
2954 "Compaq CRD-8241B", NULL,
2959 "SanDisk SDP3B", NULL,
2960 "SanDisk SDP3B-64", NULL,
2961 "SANYO CD-ROM CRD", NULL,
2962 "HITACHI CDR-8", NULL,
2963 "HITACHI CDR-8335", NULL,
2964 "HITACHI CDR-8435", NULL,
2965 "Toshiba CD-ROM XM-6202B", NULL,
2966 "TOSHIBA CD-ROM XM-1702BC", NULL,
2968 "E-IDE CD-ROM CR-840", NULL,
2969 "CD-ROM Drive/F5A", NULL,
2970 "WPI CDD-820", NULL,
2971 "SAMSUNG CD-ROM SC-148C", NULL,
2972 "SAMSUNG CD-ROM SC", NULL,
2973 "SanDisk SDP3B-64", NULL,
2974 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2975 "_NEC DV5800A", NULL,
2976 "SAMSUNG CD-ROM SN-124", "N001"
2979 static int ata_strim(char *s, size_t len)
2981 len = strnlen(s, len);
2983 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2984 while ((len > 0) && (s[len - 1] == ' ')) {
2991 static int ata_dma_blacklisted(const struct ata_device *dev)
2993 unsigned char model_num[40];
2994 unsigned char model_rev[16];
2995 unsigned int nlen, rlen;
2998 /* We don't support polling DMA.
2999 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3000 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3002 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3003 (dev->flags & ATA_DFLAG_CDB_INTR))
3006 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3008 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3010 nlen = ata_strim(model_num, sizeof(model_num));
3011 rlen = ata_strim(model_rev, sizeof(model_rev));
3013 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3014 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3015 if (ata_dma_blacklist[i+1] == NULL)
3017 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3025 * ata_dev_xfermask - Compute supported xfermask of the given device
3026 * @dev: Device to compute xfermask for
3028 * Compute supported xfermask of @dev and store it in
3029 * dev->*_mask. This function is responsible for applying all
3030 * known limits including host controller limits, device
3033 * FIXME: The current implementation limits all transfer modes to
3034 * the fastest of the lowested device on the port. This is not
3035 * required on most controllers.
3040 static void ata_dev_xfermask(struct ata_device *dev)
3042 struct ata_port *ap = dev->ap;
3043 struct ata_host_set *hs = ap->host_set;
3044 unsigned long xfer_mask;
3047 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3048 ap->mwdma_mask, ap->udma_mask);
3050 /* Apply cable rule here. Don't apply it early because when
3051 * we handle hot plug the cable type can itself change.
3053 if (ap->cbl == ATA_CBL_PATA40)
3054 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3056 /* FIXME: Use port-wide xfermask for now */
3057 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3058 struct ata_device *d = &ap->device[i];
3060 if (ata_dev_absent(d))
3063 if (ata_dev_disabled(d)) {
3064 /* to avoid violating device selection timing */
3065 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3066 UINT_MAX, UINT_MAX);
3070 xfer_mask &= ata_pack_xfermask(d->pio_mask,
3071 d->mwdma_mask, d->udma_mask);
3072 xfer_mask &= ata_id_xfermask(d->id);
3073 if (ata_dma_blacklisted(d))
3074 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3077 if (ata_dma_blacklisted(dev))
3078 ata_dev_printk(dev, KERN_WARNING,
3079 "device is on DMA blacklist, disabling DMA\n");
3081 if (hs->flags & ATA_HOST_SIMPLEX) {
3082 if (hs->simplex_claimed)
3083 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3086 if (ap->ops->mode_filter)
3087 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3089 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3090 &dev->mwdma_mask, &dev->udma_mask);
3094 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3095 * @dev: Device to which command will be sent
3097 * Issue SET FEATURES - XFER MODE command to device @dev
3101 * PCI/etc. bus probe sem.
3104 * 0 on success, AC_ERR_* mask otherwise.
3107 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3109 struct ata_taskfile tf;
3110 unsigned int err_mask;
3112 /* set up set-features taskfile */
3113 DPRINTK("set features - xfer mode\n");
3115 ata_tf_init(dev, &tf);
3116 tf.command = ATA_CMD_SET_FEATURES;
3117 tf.feature = SETFEATURES_XFER;
3118 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3119 tf.protocol = ATA_PROT_NODATA;
3120 tf.nsect = dev->xfer_mode;
3122 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3124 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3129 * ata_dev_init_params - Issue INIT DEV PARAMS command
3130 * @dev: Device to which command will be sent
3131 * @heads: Number of heads (taskfile parameter)
3132 * @sectors: Number of sectors (taskfile parameter)
3135 * Kernel thread context (may sleep)
3138 * 0 on success, AC_ERR_* mask otherwise.
3140 static unsigned int ata_dev_init_params(struct ata_device *dev,
3141 u16 heads, u16 sectors)
3143 struct ata_taskfile tf;
3144 unsigned int err_mask;
3146 /* Number of sectors per track 1-255. Number of heads 1-16 */
3147 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3148 return AC_ERR_INVALID;
3150 /* set up init dev params taskfile */
3151 DPRINTK("init dev params \n");
3153 ata_tf_init(dev, &tf);
3154 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3155 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3156 tf.protocol = ATA_PROT_NODATA;
3158 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3160 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3162 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3167 * ata_sg_clean - Unmap DMA memory associated with command
3168 * @qc: Command containing DMA memory to be released
3170 * Unmap all mapped DMA memory associated with this command.
3173 * spin_lock_irqsave(host_set lock)
3176 static void ata_sg_clean(struct ata_queued_cmd *qc)
3178 struct ata_port *ap = qc->ap;
3179 struct scatterlist *sg = qc->__sg;
3180 int dir = qc->dma_dir;
3181 void *pad_buf = NULL;
3183 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3184 WARN_ON(sg == NULL);
3186 if (qc->flags & ATA_QCFLAG_SINGLE)
3187 WARN_ON(qc->n_elem > 1);
3189 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3191 /* if we padded the buffer out to 32-bit bound, and data
3192 * xfer direction is from-device, we must copy from the
3193 * pad buffer back into the supplied buffer
3195 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3196 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3198 if (qc->flags & ATA_QCFLAG_SG) {
3200 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3201 /* restore last sg */
3202 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3204 struct scatterlist *psg = &qc->pad_sgent;
3205 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3206 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3207 kunmap_atomic(addr, KM_IRQ0);
3211 dma_unmap_single(ap->dev,
3212 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3215 sg->length += qc->pad_len;
3217 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3218 pad_buf, qc->pad_len);
3221 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3226 * ata_fill_sg - Fill PCI IDE PRD table
3227 * @qc: Metadata associated with taskfile to be transferred
3229 * Fill PCI IDE PRD (scatter-gather) table with segments
3230 * associated with the current disk command.
3233 * spin_lock_irqsave(host_set lock)
3236 static void ata_fill_sg(struct ata_queued_cmd *qc)
3238 struct ata_port *ap = qc->ap;
3239 struct scatterlist *sg;
3242 WARN_ON(qc->__sg == NULL);
3243 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3246 ata_for_each_sg(sg, qc) {
3250 /* determine if physical DMA addr spans 64K boundary.
3251 * Note h/w doesn't support 64-bit, so we unconditionally
3252 * truncate dma_addr_t to u32.
3254 addr = (u32) sg_dma_address(sg);
3255 sg_len = sg_dma_len(sg);
3258 offset = addr & 0xffff;
3260 if ((offset + sg_len) > 0x10000)
3261 len = 0x10000 - offset;
3263 ap->prd[idx].addr = cpu_to_le32(addr);
3264 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3265 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3274 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3277 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3278 * @qc: Metadata associated with taskfile to check
3280 * Allow low-level driver to filter ATA PACKET commands, returning
3281 * a status indicating whether or not it is OK to use DMA for the
3282 * supplied PACKET command.
3285 * spin_lock_irqsave(host_set lock)
3287 * RETURNS: 0 when ATAPI DMA can be used
3290 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3292 struct ata_port *ap = qc->ap;
3293 int rc = 0; /* Assume ATAPI DMA is OK by default */
3295 if (ap->ops->check_atapi_dma)
3296 rc = ap->ops->check_atapi_dma(qc);
3301 * ata_qc_prep - Prepare taskfile for submission
3302 * @qc: Metadata associated with taskfile to be prepared
3304 * Prepare ATA taskfile for submission.
3307 * spin_lock_irqsave(host_set lock)
3309 void ata_qc_prep(struct ata_queued_cmd *qc)
3311 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3317 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3320 * ata_sg_init_one - Associate command with memory buffer
3321 * @qc: Command to be associated
3322 * @buf: Memory buffer
3323 * @buflen: Length of memory buffer, in bytes.
3325 * Initialize the data-related elements of queued_cmd @qc
3326 * to point to a single memory buffer, @buf of byte length @buflen.
3329 * spin_lock_irqsave(host_set lock)
3332 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3334 struct scatterlist *sg;
3336 qc->flags |= ATA_QCFLAG_SINGLE;
3338 memset(&qc->sgent, 0, sizeof(qc->sgent));
3339 qc->__sg = &qc->sgent;
3341 qc->orig_n_elem = 1;
3343 qc->nbytes = buflen;
3346 sg_init_one(sg, buf, buflen);
3350 * ata_sg_init - Associate command with scatter-gather table.
3351 * @qc: Command to be associated
3352 * @sg: Scatter-gather table.
3353 * @n_elem: Number of elements in s/g table.
3355 * Initialize the data-related elements of queued_cmd @qc
3356 * to point to a scatter-gather table @sg, containing @n_elem
3360 * spin_lock_irqsave(host_set lock)
3363 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3364 unsigned int n_elem)
3366 qc->flags |= ATA_QCFLAG_SG;
3368 qc->n_elem = n_elem;
3369 qc->orig_n_elem = n_elem;
3373 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3374 * @qc: Command with memory buffer to be mapped.
3376 * DMA-map the memory buffer associated with queued_cmd @qc.
3379 * spin_lock_irqsave(host_set lock)
3382 * Zero on success, negative on error.
3385 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3387 struct ata_port *ap = qc->ap;
3388 int dir = qc->dma_dir;
3389 struct scatterlist *sg = qc->__sg;
3390 dma_addr_t dma_address;
3393 /* we must lengthen transfers to end on a 32-bit boundary */
3394 qc->pad_len = sg->length & 3;
3396 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3397 struct scatterlist *psg = &qc->pad_sgent;
3399 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3401 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3403 if (qc->tf.flags & ATA_TFLAG_WRITE)
3404 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3407 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3408 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3410 sg->length -= qc->pad_len;
3411 if (sg->length == 0)
3414 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3415 sg->length, qc->pad_len);
3423 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3425 if (dma_mapping_error(dma_address)) {
3427 sg->length += qc->pad_len;
3431 sg_dma_address(sg) = dma_address;
3432 sg_dma_len(sg) = sg->length;
3435 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3436 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3442 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3443 * @qc: Command with scatter-gather table to be mapped.
3445 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3448 * spin_lock_irqsave(host_set lock)
3451 * Zero on success, negative on error.
3455 static int ata_sg_setup(struct ata_queued_cmd *qc)
3457 struct ata_port *ap = qc->ap;
3458 struct scatterlist *sg = qc->__sg;
3459 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3460 int n_elem, pre_n_elem, dir, trim_sg = 0;
3462 VPRINTK("ENTER, ata%u\n", ap->id);
3463 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3465 /* we must lengthen transfers to end on a 32-bit boundary */
3466 qc->pad_len = lsg->length & 3;
3468 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3469 struct scatterlist *psg = &qc->pad_sgent;
3470 unsigned int offset;
3472 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3474 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3477 * psg->page/offset are used to copy to-be-written
3478 * data in this function or read data in ata_sg_clean.
3480 offset = lsg->offset + lsg->length - qc->pad_len;
3481 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3482 psg->offset = offset_in_page(offset);
3484 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3485 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3486 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3487 kunmap_atomic(addr, KM_IRQ0);
3490 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3491 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3493 lsg->length -= qc->pad_len;
3494 if (lsg->length == 0)
3497 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3498 qc->n_elem - 1, lsg->length, qc->pad_len);
3501 pre_n_elem = qc->n_elem;
3502 if (trim_sg && pre_n_elem)
3511 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3513 /* restore last sg */
3514 lsg->length += qc->pad_len;
3518 DPRINTK("%d sg elements mapped\n", n_elem);
3521 qc->n_elem = n_elem;
3527 * swap_buf_le16 - swap halves of 16-bit words in place
3528 * @buf: Buffer to swap
3529 * @buf_words: Number of 16-bit words in buffer.
3531 * Swap halves of 16-bit words if needed to convert from
3532 * little-endian byte order to native cpu byte order, or
3536 * Inherited from caller.
3538 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3543 for (i = 0; i < buf_words; i++)
3544 buf[i] = le16_to_cpu(buf[i]);
3545 #endif /* __BIG_ENDIAN */
3549 * ata_mmio_data_xfer - Transfer data by MMIO
3550 * @adev: device for this I/O
3552 * @buflen: buffer length
3553 * @write_data: read/write
3555 * Transfer data from/to the device data register by MMIO.
3558 * Inherited from caller.
3561 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3562 unsigned int buflen, int write_data)
3564 struct ata_port *ap = adev->ap;
3566 unsigned int words = buflen >> 1;
3567 u16 *buf16 = (u16 *) buf;
3568 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3570 /* Transfer multiple of 2 bytes */
3572 for (i = 0; i < words; i++)
3573 writew(le16_to_cpu(buf16[i]), mmio);
3575 for (i = 0; i < words; i++)
3576 buf16[i] = cpu_to_le16(readw(mmio));
3579 /* Transfer trailing 1 byte, if any. */
3580 if (unlikely(buflen & 0x01)) {
3581 u16 align_buf[1] = { 0 };
3582 unsigned char *trailing_buf = buf + buflen - 1;
3585 memcpy(align_buf, trailing_buf, 1);
3586 writew(le16_to_cpu(align_buf[0]), mmio);
3588 align_buf[0] = cpu_to_le16(readw(mmio));
3589 memcpy(trailing_buf, align_buf, 1);
3595 * ata_pio_data_xfer - Transfer data by PIO
3596 * @adev: device to target
3598 * @buflen: buffer length
3599 * @write_data: read/write
3601 * Transfer data from/to the device data register by PIO.
3604 * Inherited from caller.
3607 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3608 unsigned int buflen, int write_data)
3610 struct ata_port *ap = adev->ap;
3611 unsigned int words = buflen >> 1;
3613 /* Transfer multiple of 2 bytes */
3615 outsw(ap->ioaddr.data_addr, buf, words);
3617 insw(ap->ioaddr.data_addr, buf, words);
3619 /* Transfer trailing 1 byte, if any. */
3620 if (unlikely(buflen & 0x01)) {
3621 u16 align_buf[1] = { 0 };
3622 unsigned char *trailing_buf = buf + buflen - 1;
3625 memcpy(align_buf, trailing_buf, 1);
3626 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3628 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3629 memcpy(trailing_buf, align_buf, 1);
3635 * ata_pio_data_xfer_noirq - Transfer data by PIO
3636 * @adev: device to target
3638 * @buflen: buffer length
3639 * @write_data: read/write
3641 * Transfer data from/to the device data register by PIO. Do the
3642 * transfer with interrupts disabled.
3645 * Inherited from caller.
3648 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3649 unsigned int buflen, int write_data)
3651 unsigned long flags;
3652 local_irq_save(flags);
3653 ata_pio_data_xfer(adev, buf, buflen, write_data);
3654 local_irq_restore(flags);
3659 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3660 * @qc: Command on going
3662 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3665 * Inherited from caller.
3668 static void ata_pio_sector(struct ata_queued_cmd *qc)
3670 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3671 struct scatterlist *sg = qc->__sg;
3672 struct ata_port *ap = qc->ap;
3674 unsigned int offset;
3677 if (qc->cursect == (qc->nsect - 1))
3678 ap->hsm_task_state = HSM_ST_LAST;
3680 page = sg[qc->cursg].page;
3681 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3683 /* get the current page and offset */
3684 page = nth_page(page, (offset >> PAGE_SHIFT));
3685 offset %= PAGE_SIZE;
3687 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3689 if (PageHighMem(page)) {
3690 unsigned long flags;
3692 /* FIXME: use a bounce buffer */
3693 local_irq_save(flags);
3694 buf = kmap_atomic(page, KM_IRQ0);
3696 /* do the actual data transfer */
3697 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3699 kunmap_atomic(buf, KM_IRQ0);
3700 local_irq_restore(flags);
3702 buf = page_address(page);
3703 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3709 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3716 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3717 * @qc: Command on going
3719 * Transfer one or many ATA_SECT_SIZE of data from/to the
3720 * ATA device for the DRQ request.
3723 * Inherited from caller.
3726 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3728 if (is_multi_taskfile(&qc->tf)) {
3729 /* READ/WRITE MULTIPLE */
3732 WARN_ON(qc->dev->multi_count == 0);
3734 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3742 * atapi_send_cdb - Write CDB bytes to hardware
3743 * @ap: Port to which ATAPI device is attached.
3744 * @qc: Taskfile currently active
3746 * When device has indicated its readiness to accept
3747 * a CDB, this function is called. Send the CDB.
3753 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3756 DPRINTK("send cdb\n");
3757 WARN_ON(qc->dev->cdb_len < 12);
3759 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3760 ata_altstatus(ap); /* flush */
3762 switch (qc->tf.protocol) {
3763 case ATA_PROT_ATAPI:
3764 ap->hsm_task_state = HSM_ST;
3766 case ATA_PROT_ATAPI_NODATA:
3767 ap->hsm_task_state = HSM_ST_LAST;
3769 case ATA_PROT_ATAPI_DMA:
3770 ap->hsm_task_state = HSM_ST_LAST;
3771 /* initiate bmdma */
3772 ap->ops->bmdma_start(qc);
3778 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3779 * @qc: Command on going
3780 * @bytes: number of bytes
3782 * Transfer Transfer data from/to the ATAPI device.
3785 * Inherited from caller.
3789 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3791 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3792 struct scatterlist *sg = qc->__sg;
3793 struct ata_port *ap = qc->ap;
3796 unsigned int offset, count;
3798 if (qc->curbytes + bytes >= qc->nbytes)
3799 ap->hsm_task_state = HSM_ST_LAST;
3802 if (unlikely(qc->cursg >= qc->n_elem)) {
3804 * The end of qc->sg is reached and the device expects
3805 * more data to transfer. In order not to overrun qc->sg
3806 * and fulfill length specified in the byte count register,
3807 * - for read case, discard trailing data from the device
3808 * - for write case, padding zero data to the device
3810 u16 pad_buf[1] = { 0 };
3811 unsigned int words = bytes >> 1;
3814 if (words) /* warning if bytes > 1 */
3815 ata_dev_printk(qc->dev, KERN_WARNING,
3816 "%u bytes trailing data\n", bytes);
3818 for (i = 0; i < words; i++)
3819 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3821 ap->hsm_task_state = HSM_ST_LAST;
3825 sg = &qc->__sg[qc->cursg];
3828 offset = sg->offset + qc->cursg_ofs;
3830 /* get the current page and offset */
3831 page = nth_page(page, (offset >> PAGE_SHIFT));
3832 offset %= PAGE_SIZE;
3834 /* don't overrun current sg */
3835 count = min(sg->length - qc->cursg_ofs, bytes);
3837 /* don't cross page boundaries */
3838 count = min(count, (unsigned int)PAGE_SIZE - offset);
3840 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3842 if (PageHighMem(page)) {
3843 unsigned long flags;
3845 /* FIXME: use bounce buffer */
3846 local_irq_save(flags);
3847 buf = kmap_atomic(page, KM_IRQ0);
3849 /* do the actual data transfer */
3850 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3852 kunmap_atomic(buf, KM_IRQ0);
3853 local_irq_restore(flags);
3855 buf = page_address(page);
3856 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3860 qc->curbytes += count;
3861 qc->cursg_ofs += count;
3863 if (qc->cursg_ofs == sg->length) {
3873 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3874 * @qc: Command on going
3876 * Transfer Transfer data from/to the ATAPI device.
3879 * Inherited from caller.
3882 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3884 struct ata_port *ap = qc->ap;
3885 struct ata_device *dev = qc->dev;
3886 unsigned int ireason, bc_lo, bc_hi, bytes;
3887 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3889 /* Abuse qc->result_tf for temp storage of intermediate TF
3890 * here to save some kernel stack usage.
3891 * For normal completion, qc->result_tf is not relevant. For
3892 * error, qc->result_tf is later overwritten by ata_qc_complete().
3893 * So, the correctness of qc->result_tf is not affected.
3895 ap->ops->tf_read(ap, &qc->result_tf);
3896 ireason = qc->result_tf.nsect;
3897 bc_lo = qc->result_tf.lbam;
3898 bc_hi = qc->result_tf.lbah;
3899 bytes = (bc_hi << 8) | bc_lo;
3901 /* shall be cleared to zero, indicating xfer of data */
3902 if (ireason & (1 << 0))
3905 /* make sure transfer direction matches expected */
3906 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3907 if (do_write != i_write)
3910 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3912 __atapi_pio_bytes(qc, bytes);
3917 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3918 qc->err_mask |= AC_ERR_HSM;
3919 ap->hsm_task_state = HSM_ST_ERR;
3923 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3924 * @ap: the target ata_port
3928 * 1 if ok in workqueue, 0 otherwise.
3931 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3933 if (qc->tf.flags & ATA_TFLAG_POLLING)
3936 if (ap->hsm_task_state == HSM_ST_FIRST) {
3937 if (qc->tf.protocol == ATA_PROT_PIO &&
3938 (qc->tf.flags & ATA_TFLAG_WRITE))
3941 if (is_atapi_taskfile(&qc->tf) &&
3942 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3950 * ata_hsm_qc_complete - finish a qc running on standard HSM
3951 * @qc: Command to complete
3952 * @in_wq: 1 if called from workqueue, 0 otherwise
3954 * Finish @qc which is running on standard HSM.
3957 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3958 * Otherwise, none on entry and grabs host lock.
3960 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
3962 struct ata_port *ap = qc->ap;
3963 unsigned long flags;
3965 if (ap->ops->error_handler) {
3967 spin_lock_irqsave(ap->lock, flags);
3969 /* EH might have kicked in while host_set lock
3972 qc = ata_qc_from_tag(ap, qc->tag);
3974 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
3976 ata_qc_complete(qc);
3978 ata_port_freeze(ap);
3981 spin_unlock_irqrestore(ap->lock, flags);
3983 if (likely(!(qc->err_mask & AC_ERR_HSM)))
3984 ata_qc_complete(qc);
3986 ata_port_freeze(ap);
3990 spin_lock_irqsave(ap->lock, flags);
3992 ata_qc_complete(qc);
3993 spin_unlock_irqrestore(ap->lock, flags);
3995 ata_qc_complete(qc);
3998 ata_altstatus(ap); /* flush */
4002 * ata_hsm_move - move the HSM to the next state.
4003 * @ap: the target ata_port
4005 * @status: current device status
4006 * @in_wq: 1 if called from workqueue, 0 otherwise
4009 * 1 when poll next status needed, 0 otherwise.
4011 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4012 u8 status, int in_wq)
4014 unsigned long flags = 0;
4017 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4019 /* Make sure ata_qc_issue_prot() does not throw things
4020 * like DMA polling into the workqueue. Notice that
4021 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4023 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4026 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4027 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4029 switch (ap->hsm_task_state) {
4031 /* Send first data block or PACKET CDB */
4033 /* If polling, we will stay in the work queue after
4034 * sending the data. Otherwise, interrupt handler
4035 * takes over after sending the data.
4037 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4039 /* check device status */
4040 if (unlikely((status & ATA_DRQ) == 0)) {
4041 /* handle BSY=0, DRQ=0 as error */
4042 if (likely(status & (ATA_ERR | ATA_DF)))
4043 /* device stops HSM for abort/error */
4044 qc->err_mask |= AC_ERR_DEV;
4046 /* HSM violation. Let EH handle this */
4047 qc->err_mask |= AC_ERR_HSM;
4049 ap->hsm_task_state = HSM_ST_ERR;
4053 /* Device should not ask for data transfer (DRQ=1)
4054 * when it finds something wrong.
4055 * We ignore DRQ here and stop the HSM by
4056 * changing hsm_task_state to HSM_ST_ERR and
4057 * let the EH abort the command or reset the device.
4059 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4060 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4062 qc->err_mask |= AC_ERR_HSM;
4063 ap->hsm_task_state = HSM_ST_ERR;
4067 /* Send the CDB (atapi) or the first data block (ata pio out).
4068 * During the state transition, interrupt handler shouldn't
4069 * be invoked before the data transfer is complete and
4070 * hsm_task_state is changed. Hence, the following locking.
4073 spin_lock_irqsave(ap->lock, flags);
4075 if (qc->tf.protocol == ATA_PROT_PIO) {
4076 /* PIO data out protocol.
4077 * send first data block.
4080 /* ata_pio_sectors() might change the state
4081 * to HSM_ST_LAST. so, the state is changed here
4082 * before ata_pio_sectors().
4084 ap->hsm_task_state = HSM_ST;
4085 ata_pio_sectors(qc);
4086 ata_altstatus(ap); /* flush */
4089 atapi_send_cdb(ap, qc);
4092 spin_unlock_irqrestore(ap->lock, flags);
4094 /* if polling, ata_pio_task() handles the rest.
4095 * otherwise, interrupt handler takes over from here.
4100 /* complete command or read/write the data register */
4101 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4102 /* ATAPI PIO protocol */
4103 if ((status & ATA_DRQ) == 0) {
4104 /* No more data to transfer or device error.
4105 * Device error will be tagged in HSM_ST_LAST.
4107 ap->hsm_task_state = HSM_ST_LAST;
4111 /* Device should not ask for data transfer (DRQ=1)
4112 * when it finds something wrong.
4113 * We ignore DRQ here and stop the HSM by
4114 * changing hsm_task_state to HSM_ST_ERR and
4115 * let the EH abort the command or reset the device.
4117 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4118 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4120 qc->err_mask |= AC_ERR_HSM;
4121 ap->hsm_task_state = HSM_ST_ERR;
4125 atapi_pio_bytes(qc);
4127 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4128 /* bad ireason reported by device */
4132 /* ATA PIO protocol */
4133 if (unlikely((status & ATA_DRQ) == 0)) {
4134 /* handle BSY=0, DRQ=0 as error */
4135 if (likely(status & (ATA_ERR | ATA_DF)))
4136 /* device stops HSM for abort/error */
4137 qc->err_mask |= AC_ERR_DEV;
4139 /* HSM violation. Let EH handle this */
4140 qc->err_mask |= AC_ERR_HSM;
4142 ap->hsm_task_state = HSM_ST_ERR;
4146 /* For PIO reads, some devices may ask for
4147 * data transfer (DRQ=1) alone with ERR=1.
4148 * We respect DRQ here and transfer one
4149 * block of junk data before changing the
4150 * hsm_task_state to HSM_ST_ERR.
4152 * For PIO writes, ERR=1 DRQ=1 doesn't make
4153 * sense since the data block has been
4154 * transferred to the device.
4156 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4157 /* data might be corrputed */
4158 qc->err_mask |= AC_ERR_DEV;
4160 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4161 ata_pio_sectors(qc);
4163 status = ata_wait_idle(ap);
4166 if (status & (ATA_BUSY | ATA_DRQ))
4167 qc->err_mask |= AC_ERR_HSM;
4169 /* ata_pio_sectors() might change the
4170 * state to HSM_ST_LAST. so, the state
4171 * is changed after ata_pio_sectors().
4173 ap->hsm_task_state = HSM_ST_ERR;
4177 ata_pio_sectors(qc);
4179 if (ap->hsm_task_state == HSM_ST_LAST &&
4180 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4183 status = ata_wait_idle(ap);
4188 ata_altstatus(ap); /* flush */
4193 if (unlikely(!ata_ok(status))) {
4194 qc->err_mask |= __ac_err_mask(status);
4195 ap->hsm_task_state = HSM_ST_ERR;
4199 /* no more data to transfer */
4200 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4201 ap->id, qc->dev->devno, status);
4203 WARN_ON(qc->err_mask);
4205 ap->hsm_task_state = HSM_ST_IDLE;
4207 /* complete taskfile transaction */
4208 ata_hsm_qc_complete(qc, in_wq);
4214 /* make sure qc->err_mask is available to
4215 * know what's wrong and recover
4217 WARN_ON(qc->err_mask == 0);
4219 ap->hsm_task_state = HSM_ST_IDLE;
4221 /* complete taskfile transaction */
4222 ata_hsm_qc_complete(qc, in_wq);
4234 static void ata_pio_task(void *_data)
4236 struct ata_queued_cmd *qc = _data;
4237 struct ata_port *ap = qc->ap;
4242 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4245 * This is purely heuristic. This is a fast path.
4246 * Sometimes when we enter, BSY will be cleared in
4247 * a chk-status or two. If not, the drive is probably seeking
4248 * or something. Snooze for a couple msecs, then
4249 * chk-status again. If still busy, queue delayed work.
4251 status = ata_busy_wait(ap, ATA_BUSY, 5);
4252 if (status & ATA_BUSY) {
4254 status = ata_busy_wait(ap, ATA_BUSY, 10);
4255 if (status & ATA_BUSY) {
4256 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4262 poll_next = ata_hsm_move(ap, qc, status, 1);
4264 /* another command or interrupt handler
4265 * may be running at this point.
4272 * ata_qc_new - Request an available ATA command, for queueing
4273 * @ap: Port associated with device @dev
4274 * @dev: Device from whom we request an available command structure
4280 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4282 struct ata_queued_cmd *qc = NULL;
4285 /* no command while frozen */
4286 if (unlikely(ap->flags & ATA_FLAG_FROZEN))
4289 /* the last tag is reserved for internal command. */
4290 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4291 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4292 qc = __ata_qc_from_tag(ap, i);
4303 * ata_qc_new_init - Request an available ATA command, and initialize it
4304 * @dev: Device from whom we request an available command structure
4310 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4312 struct ata_port *ap = dev->ap;
4313 struct ata_queued_cmd *qc;
4315 qc = ata_qc_new(ap);
4328 * ata_qc_free - free unused ata_queued_cmd
4329 * @qc: Command to complete
4331 * Designed to free unused ata_queued_cmd object
4332 * in case something prevents using it.
4335 * spin_lock_irqsave(host_set lock)
4337 void ata_qc_free(struct ata_queued_cmd *qc)
4339 struct ata_port *ap = qc->ap;
4342 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4346 if (likely(ata_tag_valid(tag))) {
4347 qc->tag = ATA_TAG_POISON;
4348 clear_bit(tag, &ap->qc_allocated);
4352 void __ata_qc_complete(struct ata_queued_cmd *qc)
4354 struct ata_port *ap = qc->ap;
4356 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4357 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4359 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4362 /* command should be marked inactive atomically with qc completion */
4363 if (qc->tf.protocol == ATA_PROT_NCQ)
4364 ap->sactive &= ~(1 << qc->tag);
4366 ap->active_tag = ATA_TAG_POISON;
4368 /* atapi: mark qc as inactive to prevent the interrupt handler
4369 * from completing the command twice later, before the error handler
4370 * is called. (when rc != 0 and atapi request sense is needed)
4372 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4373 ap->qc_active &= ~(1 << qc->tag);
4375 /* call completion callback */
4376 qc->complete_fn(qc);
4380 * ata_qc_complete - Complete an active ATA command
4381 * @qc: Command to complete
4382 * @err_mask: ATA Status register contents
4384 * Indicate to the mid and upper layers that an ATA
4385 * command has completed, with either an ok or not-ok status.
4388 * spin_lock_irqsave(host_set lock)
4390 void ata_qc_complete(struct ata_queued_cmd *qc)
4392 struct ata_port *ap = qc->ap;
4394 /* XXX: New EH and old EH use different mechanisms to
4395 * synchronize EH with regular execution path.
4397 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4398 * Normal execution path is responsible for not accessing a
4399 * failed qc. libata core enforces the rule by returning NULL
4400 * from ata_qc_from_tag() for failed qcs.
4402 * Old EH depends on ata_qc_complete() nullifying completion
4403 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4404 * not synchronize with interrupt handler. Only PIO task is
4407 if (ap->ops->error_handler) {
4408 WARN_ON(ap->flags & ATA_FLAG_FROZEN);
4410 if (unlikely(qc->err_mask))
4411 qc->flags |= ATA_QCFLAG_FAILED;
4413 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4414 if (!ata_tag_internal(qc->tag)) {
4415 /* always fill result TF for failed qc */
4416 ap->ops->tf_read(ap, &qc->result_tf);
4417 ata_qc_schedule_eh(qc);
4422 /* read result TF if requested */
4423 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4424 ap->ops->tf_read(ap, &qc->result_tf);
4426 __ata_qc_complete(qc);
4428 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4431 /* read result TF if failed or requested */
4432 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4433 ap->ops->tf_read(ap, &qc->result_tf);
4435 __ata_qc_complete(qc);
4440 * ata_qc_complete_multiple - Complete multiple qcs successfully
4441 * @ap: port in question
4442 * @qc_active: new qc_active mask
4443 * @finish_qc: LLDD callback invoked before completing a qc
4445 * Complete in-flight commands. This functions is meant to be
4446 * called from low-level driver's interrupt routine to complete
4447 * requests normally. ap->qc_active and @qc_active is compared
4448 * and commands are completed accordingly.
4451 * spin_lock_irqsave(host_set lock)
4454 * Number of completed commands on success, -errno otherwise.
4456 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4457 void (*finish_qc)(struct ata_queued_cmd *))
4463 done_mask = ap->qc_active ^ qc_active;
4465 if (unlikely(done_mask & qc_active)) {
4466 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4467 "(%08x->%08x)\n", ap->qc_active, qc_active);
4471 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4472 struct ata_queued_cmd *qc;
4474 if (!(done_mask & (1 << i)))
4477 if ((qc = ata_qc_from_tag(ap, i))) {
4480 ata_qc_complete(qc);
4488 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4490 struct ata_port *ap = qc->ap;
4492 switch (qc->tf.protocol) {
4495 case ATA_PROT_ATAPI_DMA:
4498 case ATA_PROT_ATAPI:
4500 if (ap->flags & ATA_FLAG_PIO_DMA)
4513 * ata_qc_issue - issue taskfile to device
4514 * @qc: command to issue to device
4516 * Prepare an ATA command to submission to device.
4517 * This includes mapping the data into a DMA-able
4518 * area, filling in the S/G table, and finally
4519 * writing the taskfile to hardware, starting the command.
4522 * spin_lock_irqsave(host_set lock)
4524 void ata_qc_issue(struct ata_queued_cmd *qc)
4526 struct ata_port *ap = qc->ap;
4528 /* Make sure only one non-NCQ command is outstanding. The
4529 * check is skipped for old EH because it reuses active qc to
4530 * request ATAPI sense.
4532 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4534 if (qc->tf.protocol == ATA_PROT_NCQ) {
4535 WARN_ON(ap->sactive & (1 << qc->tag));
4536 ap->sactive |= 1 << qc->tag;
4538 WARN_ON(ap->sactive);
4539 ap->active_tag = qc->tag;
4542 qc->flags |= ATA_QCFLAG_ACTIVE;
4543 ap->qc_active |= 1 << qc->tag;
4545 if (ata_should_dma_map(qc)) {
4546 if (qc->flags & ATA_QCFLAG_SG) {
4547 if (ata_sg_setup(qc))
4549 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4550 if (ata_sg_setup_one(qc))
4554 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4557 ap->ops->qc_prep(qc);
4559 qc->err_mask |= ap->ops->qc_issue(qc);
4560 if (unlikely(qc->err_mask))
4565 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4566 qc->err_mask |= AC_ERR_SYSTEM;
4568 ata_qc_complete(qc);
4572 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4573 * @qc: command to issue to device
4575 * Using various libata functions and hooks, this function
4576 * starts an ATA command. ATA commands are grouped into
4577 * classes called "protocols", and issuing each type of protocol
4578 * is slightly different.
4580 * May be used as the qc_issue() entry in ata_port_operations.
4583 * spin_lock_irqsave(host_set lock)
4586 * Zero on success, AC_ERR_* mask on failure
4589 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4591 struct ata_port *ap = qc->ap;
4593 /* Use polling pio if the LLD doesn't handle
4594 * interrupt driven pio and atapi CDB interrupt.
4596 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4597 switch (qc->tf.protocol) {
4599 case ATA_PROT_ATAPI:
4600 case ATA_PROT_ATAPI_NODATA:
4601 qc->tf.flags |= ATA_TFLAG_POLLING;
4603 case ATA_PROT_ATAPI_DMA:
4604 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4605 /* see ata_dma_blacklisted() */
4613 /* select the device */
4614 ata_dev_select(ap, qc->dev->devno, 1, 0);
4616 /* start the command */
4617 switch (qc->tf.protocol) {
4618 case ATA_PROT_NODATA:
4619 if (qc->tf.flags & ATA_TFLAG_POLLING)
4620 ata_qc_set_polling(qc);
4622 ata_tf_to_host(ap, &qc->tf);
4623 ap->hsm_task_state = HSM_ST_LAST;
4625 if (qc->tf.flags & ATA_TFLAG_POLLING)
4626 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4631 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4633 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4634 ap->ops->bmdma_setup(qc); /* set up bmdma */
4635 ap->ops->bmdma_start(qc); /* initiate bmdma */
4636 ap->hsm_task_state = HSM_ST_LAST;
4640 if (qc->tf.flags & ATA_TFLAG_POLLING)
4641 ata_qc_set_polling(qc);
4643 ata_tf_to_host(ap, &qc->tf);
4645 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4646 /* PIO data out protocol */
4647 ap->hsm_task_state = HSM_ST_FIRST;
4648 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4650 /* always send first data block using
4651 * the ata_pio_task() codepath.
4654 /* PIO data in protocol */
4655 ap->hsm_task_state = HSM_ST;
4657 if (qc->tf.flags & ATA_TFLAG_POLLING)
4658 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4660 /* if polling, ata_pio_task() handles the rest.
4661 * otherwise, interrupt handler takes over from here.
4667 case ATA_PROT_ATAPI:
4668 case ATA_PROT_ATAPI_NODATA:
4669 if (qc->tf.flags & ATA_TFLAG_POLLING)
4670 ata_qc_set_polling(qc);
4672 ata_tf_to_host(ap, &qc->tf);
4674 ap->hsm_task_state = HSM_ST_FIRST;
4676 /* send cdb by polling if no cdb interrupt */
4677 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4678 (qc->tf.flags & ATA_TFLAG_POLLING))
4679 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4682 case ATA_PROT_ATAPI_DMA:
4683 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4685 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4686 ap->ops->bmdma_setup(qc); /* set up bmdma */
4687 ap->hsm_task_state = HSM_ST_FIRST;
4689 /* send cdb by polling if no cdb interrupt */
4690 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4691 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4696 return AC_ERR_SYSTEM;
4703 * ata_host_intr - Handle host interrupt for given (port, task)
4704 * @ap: Port on which interrupt arrived (possibly...)
4705 * @qc: Taskfile currently active in engine
4707 * Handle host interrupt for given queued command. Currently,
4708 * only DMA interrupts are handled. All other commands are
4709 * handled via polling with interrupts disabled (nIEN bit).
4712 * spin_lock_irqsave(host_set lock)
4715 * One if interrupt was handled, zero if not (shared irq).
4718 inline unsigned int ata_host_intr (struct ata_port *ap,
4719 struct ata_queued_cmd *qc)
4721 u8 status, host_stat = 0;
4723 VPRINTK("ata%u: protocol %d task_state %d\n",
4724 ap->id, qc->tf.protocol, ap->hsm_task_state);
4726 /* Check whether we are expecting interrupt in this state */
4727 switch (ap->hsm_task_state) {
4729 /* Some pre-ATAPI-4 devices assert INTRQ
4730 * at this state when ready to receive CDB.
4733 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4734 * The flag was turned on only for atapi devices.
4735 * No need to check is_atapi_taskfile(&qc->tf) again.
4737 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4741 if (qc->tf.protocol == ATA_PROT_DMA ||
4742 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4743 /* check status of DMA engine */
4744 host_stat = ap->ops->bmdma_status(ap);
4745 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4747 /* if it's not our irq... */
4748 if (!(host_stat & ATA_DMA_INTR))
4751 /* before we do anything else, clear DMA-Start bit */
4752 ap->ops->bmdma_stop(qc);
4754 if (unlikely(host_stat & ATA_DMA_ERR)) {
4755 /* error when transfering data to/from memory */
4756 qc->err_mask |= AC_ERR_HOST_BUS;
4757 ap->hsm_task_state = HSM_ST_ERR;
4767 /* check altstatus */
4768 status = ata_altstatus(ap);
4769 if (status & ATA_BUSY)
4772 /* check main status, clearing INTRQ */
4773 status = ata_chk_status(ap);
4774 if (unlikely(status & ATA_BUSY))
4777 /* ack bmdma irq events */
4778 ap->ops->irq_clear(ap);
4780 ata_hsm_move(ap, qc, status, 0);
4781 return 1; /* irq handled */
4784 ap->stats.idle_irq++;
4787 if ((ap->stats.idle_irq % 1000) == 0) {
4788 ata_irq_ack(ap, 0); /* debug trap */
4789 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4793 return 0; /* irq not handled */
4797 * ata_interrupt - Default ATA host interrupt handler
4798 * @irq: irq line (unused)
4799 * @dev_instance: pointer to our ata_host_set information structure
4802 * Default interrupt handler for PCI IDE devices. Calls
4803 * ata_host_intr() for each port that is not disabled.
4806 * Obtains host_set lock during operation.
4809 * IRQ_NONE or IRQ_HANDLED.
4812 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4814 struct ata_host_set *host_set = dev_instance;
4816 unsigned int handled = 0;
4817 unsigned long flags;
4819 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4820 spin_lock_irqsave(&host_set->lock, flags);
4822 for (i = 0; i < host_set->n_ports; i++) {
4823 struct ata_port *ap;
4825 ap = host_set->ports[i];
4827 !(ap->flags & ATA_FLAG_DISABLED)) {
4828 struct ata_queued_cmd *qc;
4830 qc = ata_qc_from_tag(ap, ap->active_tag);
4831 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4832 (qc->flags & ATA_QCFLAG_ACTIVE))
4833 handled |= ata_host_intr(ap, qc);
4837 spin_unlock_irqrestore(&host_set->lock, flags);
4839 return IRQ_RETVAL(handled);
4843 * sata_scr_valid - test whether SCRs are accessible
4844 * @ap: ATA port to test SCR accessibility for
4846 * Test whether SCRs are accessible for @ap.
4852 * 1 if SCRs are accessible, 0 otherwise.
4854 int sata_scr_valid(struct ata_port *ap)
4856 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4860 * sata_scr_read - read SCR register of the specified port
4861 * @ap: ATA port to read SCR for
4863 * @val: Place to store read value
4865 * Read SCR register @reg of @ap into *@val. This function is
4866 * guaranteed to succeed if the cable type of the port is SATA
4867 * and the port implements ->scr_read.
4873 * 0 on success, negative errno on failure.
4875 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4877 if (sata_scr_valid(ap)) {
4878 *val = ap->ops->scr_read(ap, reg);
4885 * sata_scr_write - write SCR register of the specified port
4886 * @ap: ATA port to write SCR for
4887 * @reg: SCR to write
4888 * @val: value to write
4890 * Write @val to SCR register @reg of @ap. This function is
4891 * guaranteed to succeed if the cable type of the port is SATA
4892 * and the port implements ->scr_read.
4898 * 0 on success, negative errno on failure.
4900 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4902 if (sata_scr_valid(ap)) {
4903 ap->ops->scr_write(ap, reg, val);
4910 * sata_scr_write_flush - write SCR register of the specified port and flush
4911 * @ap: ATA port to write SCR for
4912 * @reg: SCR to write
4913 * @val: value to write
4915 * This function is identical to sata_scr_write() except that this
4916 * function performs flush after writing to the register.
4922 * 0 on success, negative errno on failure.
4924 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4926 if (sata_scr_valid(ap)) {
4927 ap->ops->scr_write(ap, reg, val);
4928 ap->ops->scr_read(ap, reg);
4935 * ata_port_online - test whether the given port is online
4936 * @ap: ATA port to test
4938 * Test whether @ap is online. Note that this function returns 0
4939 * if online status of @ap cannot be obtained, so
4940 * ata_port_online(ap) != !ata_port_offline(ap).
4946 * 1 if the port online status is available and online.
4948 int ata_port_online(struct ata_port *ap)
4952 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
4958 * ata_port_offline - test whether the given port is offline
4959 * @ap: ATA port to test
4961 * Test whether @ap is offline. Note that this function returns
4962 * 0 if offline status of @ap cannot be obtained, so
4963 * ata_port_online(ap) != !ata_port_offline(ap).
4969 * 1 if the port offline status is available and offline.
4971 int ata_port_offline(struct ata_port *ap)
4975 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
4980 int ata_flush_cache(struct ata_device *dev)
4982 unsigned int err_mask;
4985 if (!ata_try_flush_cache(dev))
4988 if (ata_id_has_flush_ext(dev->id))
4989 cmd = ATA_CMD_FLUSH_EXT;
4991 cmd = ATA_CMD_FLUSH;
4993 err_mask = ata_do_simple_cmd(dev, cmd);
4995 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5002 static int ata_standby_drive(struct ata_device *dev)
5004 unsigned int err_mask;
5006 err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
5008 ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
5009 "(err_mask=0x%x)\n", err_mask);
5016 static int ata_start_drive(struct ata_device *dev)
5018 unsigned int err_mask;
5020 err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
5022 ata_dev_printk(dev, KERN_ERR, "failed to start drive "
5023 "(err_mask=0x%x)\n", err_mask);
5031 * ata_device_resume - wakeup a previously suspended devices
5032 * @dev: the device to resume
5034 * Kick the drive back into action, by sending it an idle immediate
5035 * command and making sure its transfer mode matches between drive
5039 int ata_device_resume(struct ata_device *dev)
5041 struct ata_port *ap = dev->ap;
5043 if (ap->flags & ATA_FLAG_SUSPENDED) {
5044 struct ata_device *failed_dev;
5046 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
5047 ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
5049 ap->flags &= ~ATA_FLAG_SUSPENDED;
5050 while (ata_set_mode(ap, &failed_dev))
5051 ata_dev_disable(failed_dev);
5053 if (!ata_dev_enabled(dev))
5055 if (dev->class == ATA_DEV_ATA)
5056 ata_start_drive(dev);
5062 * ata_device_suspend - prepare a device for suspend
5063 * @dev: the device to suspend
5064 * @state: target power management state
5066 * Flush the cache on the drive, if appropriate, then issue a
5067 * standbynow command.
5069 int ata_device_suspend(struct ata_device *dev, pm_message_t state)
5071 struct ata_port *ap = dev->ap;
5073 if (!ata_dev_enabled(dev))
5075 if (dev->class == ATA_DEV_ATA)
5076 ata_flush_cache(dev);
5078 if (state.event != PM_EVENT_FREEZE)
5079 ata_standby_drive(dev);
5080 ap->flags |= ATA_FLAG_SUSPENDED;
5085 * ata_port_start - Set port up for dma.
5086 * @ap: Port to initialize
5088 * Called just after data structures for each port are
5089 * initialized. Allocates space for PRD table.
5091 * May be used as the port_start() entry in ata_port_operations.
5094 * Inherited from caller.
5097 int ata_port_start (struct ata_port *ap)
5099 struct device *dev = ap->dev;
5102 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5106 rc = ata_pad_alloc(ap, dev);
5108 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5112 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5119 * ata_port_stop - Undo ata_port_start()
5120 * @ap: Port to shut down
5122 * Frees the PRD table.
5124 * May be used as the port_stop() entry in ata_port_operations.
5127 * Inherited from caller.
5130 void ata_port_stop (struct ata_port *ap)
5132 struct device *dev = ap->dev;
5134 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5135 ata_pad_free(ap, dev);
5138 void ata_host_stop (struct ata_host_set *host_set)
5140 if (host_set->mmio_base)
5141 iounmap(host_set->mmio_base);
5146 * ata_host_remove - Unregister SCSI host structure with upper layers
5147 * @ap: Port to unregister
5148 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
5151 * Inherited from caller.
5154 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
5156 struct Scsi_Host *sh = ap->host;
5161 scsi_remove_host(sh);
5163 ap->ops->port_stop(ap);
5167 * ata_dev_init - Initialize an ata_device structure
5168 * @dev: Device structure to initialize
5170 * Initialize @dev in preparation for probing.
5173 * Inherited from caller.
5175 void ata_dev_init(struct ata_device *dev)
5177 struct ata_port *ap = dev->ap;
5178 unsigned long flags;
5180 /* SATA spd limit is bound to the first device */
5181 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5183 /* High bits of dev->flags are used to record warm plug
5184 * requests which occur asynchronously. Synchronize using
5187 spin_lock_irqsave(ap->lock, flags);
5188 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5189 spin_unlock_irqrestore(ap->lock, flags);
5191 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5192 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5193 dev->pio_mask = UINT_MAX;
5194 dev->mwdma_mask = UINT_MAX;
5195 dev->udma_mask = UINT_MAX;
5199 * ata_host_init - Initialize an ata_port structure
5200 * @ap: Structure to initialize
5201 * @host: associated SCSI mid-layer structure
5202 * @host_set: Collection of hosts to which @ap belongs
5203 * @ent: Probe information provided by low-level driver
5204 * @port_no: Port number associated with this ata_port
5206 * Initialize a new ata_port structure, and its associated
5210 * Inherited from caller.
5212 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
5213 struct ata_host_set *host_set,
5214 const struct ata_probe_ent *ent, unsigned int port_no)
5220 host->max_channel = 1;
5221 host->unique_id = ata_unique_id++;
5222 host->max_cmd_len = 12;
5224 ap->lock = &host_set->lock;
5225 ap->flags = ATA_FLAG_DISABLED;
5226 ap->id = host->unique_id;
5228 ap->ctl = ATA_DEVCTL_OBS;
5229 ap->host_set = host_set;
5231 ap->port_no = port_no;
5233 ent->legacy_mode ? ent->hard_port_no : port_no;
5234 ap->pio_mask = ent->pio_mask;
5235 ap->mwdma_mask = ent->mwdma_mask;
5236 ap->udma_mask = ent->udma_mask;
5237 ap->flags |= ent->host_flags;
5238 ap->ops = ent->port_ops;
5239 ap->hw_sata_spd_limit = UINT_MAX;
5240 ap->active_tag = ATA_TAG_POISON;
5241 ap->last_ctl = 0xFF;
5243 #if defined(ATA_VERBOSE_DEBUG)
5244 /* turn on all debugging levels */
5245 ap->msg_enable = 0x00FF;
5246 #elif defined(ATA_DEBUG)
5247 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5249 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5252 INIT_WORK(&ap->port_task, NULL, NULL);
5253 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5254 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5255 INIT_LIST_HEAD(&ap->eh_done_q);
5256 init_waitqueue_head(&ap->eh_wait_q);
5258 /* set cable type */
5259 ap->cbl = ATA_CBL_NONE;
5260 if (ap->flags & ATA_FLAG_SATA)
5261 ap->cbl = ATA_CBL_SATA;
5263 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5264 struct ata_device *dev = &ap->device[i];
5271 ap->stats.unhandled_irq = 1;
5272 ap->stats.idle_irq = 1;
5275 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5279 * ata_host_add - Attach low-level ATA driver to system
5280 * @ent: Information provided by low-level driver
5281 * @host_set: Collections of ports to which we add
5282 * @port_no: Port number associated with this host
5284 * Attach low-level ATA driver to system.
5287 * PCI/etc. bus probe sem.
5290 * New ata_port on success, for NULL on error.
5293 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
5294 struct ata_host_set *host_set,
5295 unsigned int port_no)
5297 struct Scsi_Host *host;
5298 struct ata_port *ap;
5303 if (!ent->port_ops->error_handler &&
5304 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5305 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5310 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5314 host->transportt = &ata_scsi_transport_template;
5316 ap = ata_shost_to_port(host);
5318 ata_host_init(ap, host, host_set, ent, port_no);
5320 rc = ap->ops->port_start(ap);
5327 scsi_host_put(host);
5332 * ata_device_add - Register hardware device with ATA and SCSI layers
5333 * @ent: Probe information describing hardware device to be registered
5335 * This function processes the information provided in the probe
5336 * information struct @ent, allocates the necessary ATA and SCSI
5337 * host information structures, initializes them, and registers
5338 * everything with requisite kernel subsystems.
5340 * This function requests irqs, probes the ATA bus, and probes
5344 * PCI/etc. bus probe sem.
5347 * Number of ports registered. Zero on error (no ports registered).
5349 int ata_device_add(const struct ata_probe_ent *ent)
5351 unsigned int count = 0, i;
5352 struct device *dev = ent->dev;
5353 struct ata_host_set *host_set;
5357 /* alloc a container for our list of ATA ports (buses) */
5358 host_set = kzalloc(sizeof(struct ata_host_set) +
5359 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5362 spin_lock_init(&host_set->lock);
5364 host_set->dev = dev;
5365 host_set->n_ports = ent->n_ports;
5366 host_set->irq = ent->irq;
5367 host_set->mmio_base = ent->mmio_base;
5368 host_set->private_data = ent->private_data;
5369 host_set->ops = ent->port_ops;
5370 host_set->flags = ent->host_set_flags;
5372 /* register each port bound to this device */
5373 for (i = 0; i < ent->n_ports; i++) {
5374 struct ata_port *ap;
5375 unsigned long xfer_mode_mask;
5377 ap = ata_host_add(ent, host_set, i);
5381 host_set->ports[i] = ap;
5382 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5383 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5384 (ap->pio_mask << ATA_SHIFT_PIO);
5386 /* print per-port info to dmesg */
5387 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5388 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5389 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5390 ata_mode_string(xfer_mode_mask),
5391 ap->ioaddr.cmd_addr,
5392 ap->ioaddr.ctl_addr,
5393 ap->ioaddr.bmdma_addr,
5397 host_set->ops->irq_clear(ap);
5398 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5405 /* obtain irq, that is shared between channels */
5406 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5407 DRV_NAME, host_set);
5409 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5414 /* perform each probe synchronously */
5415 DPRINTK("probe begin\n");
5416 for (i = 0; i < count; i++) {
5417 struct ata_port *ap;
5421 ap = host_set->ports[i];
5423 /* init sata_spd_limit to the current value */
5424 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5425 int spd = (scontrol >> 4) & 0xf;
5426 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5428 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5430 rc = scsi_add_host(ap->host, dev);
5432 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5433 /* FIXME: do something useful here */
5434 /* FIXME: handle unconditional calls to
5435 * scsi_scan_host and ata_host_remove, below,
5440 if (ap->ops->error_handler) {
5441 unsigned long flags;
5445 /* kick EH for boot probing */
5446 spin_lock_irqsave(ap->lock, flags);
5448 ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5449 ap->eh_info.action |= ATA_EH_SOFTRESET;
5451 ap->flags |= ATA_FLAG_LOADING;
5452 ata_port_schedule_eh(ap);
5454 spin_unlock_irqrestore(ap->lock, flags);
5456 /* wait for EH to finish */
5457 ata_port_wait_eh(ap);
5459 DPRINTK("ata%u: bus probe begin\n", ap->id);
5460 rc = ata_bus_probe(ap);
5461 DPRINTK("ata%u: bus probe end\n", ap->id);
5464 /* FIXME: do something useful here?
5465 * Current libata behavior will
5466 * tear down everything when
5467 * the module is removed
5468 * or the h/w is unplugged.
5474 /* probes are done, now scan each port's disk(s) */
5475 DPRINTK("host probe begin\n");
5476 for (i = 0; i < count; i++) {
5477 struct ata_port *ap = host_set->ports[i];
5479 ata_scsi_scan_host(ap);
5482 dev_set_drvdata(dev, host_set);
5484 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5485 return ent->n_ports; /* success */
5488 for (i = 0; i < count; i++) {
5489 ata_host_remove(host_set->ports[i], 1);
5490 scsi_host_put(host_set->ports[i]->host);
5494 VPRINTK("EXIT, returning 0\n");
5499 * ata_port_detach - Detach ATA port in prepration of device removal
5500 * @ap: ATA port to be detached
5502 * Detach all ATA devices and the associated SCSI devices of @ap;
5503 * then, remove the associated SCSI host. @ap is guaranteed to
5504 * be quiescent on return from this function.
5507 * Kernel thread context (may sleep).
5509 void ata_port_detach(struct ata_port *ap)
5511 unsigned long flags;
5514 if (!ap->ops->error_handler)
5517 /* tell EH we're leaving & flush EH */
5518 spin_lock_irqsave(ap->lock, flags);
5519 ap->flags |= ATA_FLAG_UNLOADING;
5520 spin_unlock_irqrestore(ap->lock, flags);
5522 ata_port_wait_eh(ap);
5524 /* EH is now guaranteed to see UNLOADING, so no new device
5525 * will be attached. Disable all existing devices.
5527 spin_lock_irqsave(ap->lock, flags);
5529 for (i = 0; i < ATA_MAX_DEVICES; i++)
5530 ata_dev_disable(&ap->device[i]);
5532 spin_unlock_irqrestore(ap->lock, flags);
5534 /* Final freeze & EH. All in-flight commands are aborted. EH
5535 * will be skipped and retrials will be terminated with bad
5538 spin_lock_irqsave(ap->lock, flags);
5539 ata_port_freeze(ap); /* won't be thawed */
5540 spin_unlock_irqrestore(ap->lock, flags);
5542 ata_port_wait_eh(ap);
5544 /* Flush hotplug task. The sequence is similar to
5545 * ata_port_flush_task().
5547 flush_workqueue(ata_aux_wq);
5548 cancel_delayed_work(&ap->hotplug_task);
5549 flush_workqueue(ata_aux_wq);
5551 /* remove the associated SCSI host */
5552 scsi_remove_host(ap->host);
5556 * ata_host_set_remove - PCI layer callback for device removal
5557 * @host_set: ATA host set that was removed
5559 * Unregister all objects associated with this host set. Free those
5563 * Inherited from calling layer (may sleep).
5566 void ata_host_set_remove(struct ata_host_set *host_set)
5570 for (i = 0; i < host_set->n_ports; i++)
5571 ata_port_detach(host_set->ports[i]);
5573 free_irq(host_set->irq, host_set);
5575 for (i = 0; i < host_set->n_ports; i++) {
5576 struct ata_port *ap = host_set->ports[i];
5578 ata_scsi_release(ap->host);
5580 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5581 struct ata_ioports *ioaddr = &ap->ioaddr;
5583 if (ioaddr->cmd_addr == 0x1f0)
5584 release_region(0x1f0, 8);
5585 else if (ioaddr->cmd_addr == 0x170)
5586 release_region(0x170, 8);
5589 scsi_host_put(ap->host);
5592 if (host_set->ops->host_stop)
5593 host_set->ops->host_stop(host_set);
5599 * ata_scsi_release - SCSI layer callback hook for host unload
5600 * @host: libata host to be unloaded
5602 * Performs all duties necessary to shut down a libata port...
5603 * Kill port kthread, disable port, and release resources.
5606 * Inherited from SCSI layer.
5612 int ata_scsi_release(struct Scsi_Host *host)
5614 struct ata_port *ap = ata_shost_to_port(host);
5618 ap->ops->port_disable(ap);
5619 ata_host_remove(ap, 0);
5626 * ata_std_ports - initialize ioaddr with standard port offsets.
5627 * @ioaddr: IO address structure to be initialized
5629 * Utility function which initializes data_addr, error_addr,
5630 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5631 * device_addr, status_addr, and command_addr to standard offsets
5632 * relative to cmd_addr.
5634 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5637 void ata_std_ports(struct ata_ioports *ioaddr)
5639 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5640 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5641 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5642 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5643 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5644 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5645 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5646 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5647 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5648 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5654 void ata_pci_host_stop (struct ata_host_set *host_set)
5656 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5658 pci_iounmap(pdev, host_set->mmio_base);
5662 * ata_pci_remove_one - PCI layer callback for device removal
5663 * @pdev: PCI device that was removed
5665 * PCI layer indicates to libata via this hook that
5666 * hot-unplug or module unload event has occurred.
5667 * Handle this by unregistering all objects associated
5668 * with this PCI device. Free those objects. Then finally
5669 * release PCI resources and disable device.
5672 * Inherited from PCI layer (may sleep).
5675 void ata_pci_remove_one (struct pci_dev *pdev)
5677 struct device *dev = pci_dev_to_dev(pdev);
5678 struct ata_host_set *host_set = dev_get_drvdata(dev);
5679 struct ata_host_set *host_set2 = host_set->next;
5681 ata_host_set_remove(host_set);
5683 ata_host_set_remove(host_set2);
5685 pci_release_regions(pdev);
5686 pci_disable_device(pdev);
5687 dev_set_drvdata(dev, NULL);
5690 /* move to PCI subsystem */
5691 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5693 unsigned long tmp = 0;
5695 switch (bits->width) {
5698 pci_read_config_byte(pdev, bits->reg, &tmp8);
5704 pci_read_config_word(pdev, bits->reg, &tmp16);
5710 pci_read_config_dword(pdev, bits->reg, &tmp32);
5721 return (tmp == bits->val) ? 1 : 0;
5724 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5726 pci_save_state(pdev);
5727 pci_disable_device(pdev);
5728 pci_set_power_state(pdev, PCI_D3hot);
5732 int ata_pci_device_resume(struct pci_dev *pdev)
5734 pci_set_power_state(pdev, PCI_D0);
5735 pci_restore_state(pdev);
5736 pci_enable_device(pdev);
5737 pci_set_master(pdev);
5740 #endif /* CONFIG_PCI */
5743 static int __init ata_init(void)
5745 ata_probe_timeout *= HZ;
5746 ata_wq = create_workqueue("ata");
5750 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5752 destroy_workqueue(ata_wq);
5756 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5760 static void __exit ata_exit(void)
5762 destroy_workqueue(ata_wq);
5763 destroy_workqueue(ata_aux_wq);
5766 module_init(ata_init);
5767 module_exit(ata_exit);
5769 static unsigned long ratelimit_time;
5770 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5772 int ata_ratelimit(void)
5775 unsigned long flags;
5777 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5779 if (time_after(jiffies, ratelimit_time)) {
5781 ratelimit_time = jiffies + (HZ/5);
5785 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5791 * ata_wait_register - wait until register value changes
5792 * @reg: IO-mapped register
5793 * @mask: Mask to apply to read register value
5794 * @val: Wait condition
5795 * @interval_msec: polling interval in milliseconds
5796 * @timeout_msec: timeout in milliseconds
5798 * Waiting for some bits of register to change is a common
5799 * operation for ATA controllers. This function reads 32bit LE
5800 * IO-mapped register @reg and tests for the following condition.
5802 * (*@reg & mask) != val
5804 * If the condition is met, it returns; otherwise, the process is
5805 * repeated after @interval_msec until timeout.
5808 * Kernel thread context (may sleep)
5811 * The final register value.
5813 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
5814 unsigned long interval_msec,
5815 unsigned long timeout_msec)
5817 unsigned long timeout;
5820 tmp = ioread32(reg);
5822 /* Calculate timeout _after_ the first read to make sure
5823 * preceding writes reach the controller before starting to
5824 * eat away the timeout.
5826 timeout = jiffies + (timeout_msec * HZ) / 1000;
5828 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
5829 msleep(interval_msec);
5830 tmp = ioread32(reg);
5837 * libata is essentially a library of internal helper functions for
5838 * low-level ATA host controller drivers. As such, the API/ABI is
5839 * likely to change as new drivers are added and updated.
5840 * Do not depend on ABI/API stability.
5843 EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
5844 EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
5845 EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
5846 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5847 EXPORT_SYMBOL_GPL(ata_std_ports);
5848 EXPORT_SYMBOL_GPL(ata_device_add);
5849 EXPORT_SYMBOL_GPL(ata_port_detach);
5850 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5851 EXPORT_SYMBOL_GPL(ata_sg_init);
5852 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5853 EXPORT_SYMBOL_GPL(ata_hsm_move);
5854 EXPORT_SYMBOL_GPL(ata_qc_complete);
5855 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5856 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5857 EXPORT_SYMBOL_GPL(ata_tf_load);
5858 EXPORT_SYMBOL_GPL(ata_tf_read);
5859 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5860 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5861 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5862 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5863 EXPORT_SYMBOL_GPL(ata_check_status);
5864 EXPORT_SYMBOL_GPL(ata_altstatus);
5865 EXPORT_SYMBOL_GPL(ata_exec_command);
5866 EXPORT_SYMBOL_GPL(ata_port_start);
5867 EXPORT_SYMBOL_GPL(ata_port_stop);
5868 EXPORT_SYMBOL_GPL(ata_host_stop);
5869 EXPORT_SYMBOL_GPL(ata_interrupt);
5870 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
5871 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
5872 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
5873 EXPORT_SYMBOL_GPL(ata_qc_prep);
5874 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5875 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5876 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5877 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5878 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5879 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5880 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
5881 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
5882 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
5883 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
5884 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
5885 EXPORT_SYMBOL_GPL(ata_port_probe);
5886 EXPORT_SYMBOL_GPL(sata_set_spd);
5887 EXPORT_SYMBOL_GPL(sata_phy_debounce);
5888 EXPORT_SYMBOL_GPL(sata_phy_resume);
5889 EXPORT_SYMBOL_GPL(sata_phy_reset);
5890 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5891 EXPORT_SYMBOL_GPL(ata_bus_reset);
5892 EXPORT_SYMBOL_GPL(ata_std_prereset);
5893 EXPORT_SYMBOL_GPL(ata_std_softreset);
5894 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5895 EXPORT_SYMBOL_GPL(ata_std_postreset);
5896 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5897 EXPORT_SYMBOL_GPL(ata_dev_classify);
5898 EXPORT_SYMBOL_GPL(ata_dev_pair);
5899 EXPORT_SYMBOL_GPL(ata_port_disable);
5900 EXPORT_SYMBOL_GPL(ata_ratelimit);
5901 EXPORT_SYMBOL_GPL(ata_wait_register);
5902 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5903 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5904 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5905 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5906 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5907 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
5908 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
5909 EXPORT_SYMBOL_GPL(ata_scsi_release);
5910 EXPORT_SYMBOL_GPL(ata_host_intr);
5911 EXPORT_SYMBOL_GPL(sata_scr_valid);
5912 EXPORT_SYMBOL_GPL(sata_scr_read);
5913 EXPORT_SYMBOL_GPL(sata_scr_write);
5914 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5915 EXPORT_SYMBOL_GPL(ata_port_online);
5916 EXPORT_SYMBOL_GPL(ata_port_offline);
5917 EXPORT_SYMBOL_GPL(ata_id_string);
5918 EXPORT_SYMBOL_GPL(ata_id_c_string);
5919 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5921 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5922 EXPORT_SYMBOL_GPL(ata_timing_compute);
5923 EXPORT_SYMBOL_GPL(ata_timing_merge);
5926 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5927 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5928 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5929 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5930 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5931 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5932 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5933 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5934 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5935 #endif /* CONFIG_PCI */
5937 EXPORT_SYMBOL_GPL(ata_device_suspend);
5938 EXPORT_SYMBOL_GPL(ata_device_resume);
5939 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5940 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5942 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5943 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
5944 EXPORT_SYMBOL_GPL(ata_port_abort);
5945 EXPORT_SYMBOL_GPL(ata_port_freeze);
5946 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
5947 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
5948 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5949 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5950 EXPORT_SYMBOL_GPL(ata_do_eh);