2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_device *dev);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 struct workqueue_struct *ata_aux_wq;
77 int atapi_enabled = 1;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82 module_param(atapi_dmadir, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86 module_param_named(fua, libata_fua, int, 0444);
87 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90 module_param(ata_probe_timeout, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
131 fis[13] = tf->hob_nsect;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
167 tf->hob_nsect = fis[13];
170 static const u8 ata_rw_cmds[] = {
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @tf: command to examine and configure
203 * @dev: device tf belongs to
205 * Examine the device configuration and tf->flags to calculate
206 * the proper read/write commands and protocol to use.
211 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
215 int index, fua, lba48, write;
217 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
218 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
219 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
221 if (dev->flags & ATA_DFLAG_PIO) {
222 tf->protocol = ATA_PROT_PIO;
223 index = dev->multi_count ? 0 : 8;
224 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
225 /* Unable to use DMA due to host limitation */
226 tf->protocol = ATA_PROT_PIO;
227 index = dev->multi_count ? 0 : 8;
229 tf->protocol = ATA_PROT_DMA;
233 cmd = ata_rw_cmds[index + fua + lba48 + write];
242 * ata_tf_read_block - Read block address from ATA taskfile
243 * @tf: ATA taskfile of interest
244 * @dev: ATA device @tf belongs to
249 * Read block address from @tf. This function can handle all
250 * three address formats - LBA, LBA48 and CHS. tf->protocol and
251 * flags select the address format to use.
254 * Block address read from @tf.
256 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
260 if (tf->flags & ATA_TFLAG_LBA) {
261 if (tf->flags & ATA_TFLAG_LBA48) {
262 block |= (u64)tf->hob_lbah << 40;
263 block |= (u64)tf->hob_lbam << 32;
264 block |= tf->hob_lbal << 24;
266 block |= (tf->device & 0xf) << 24;
268 block |= tf->lbah << 16;
269 block |= tf->lbam << 8;
274 cyl = tf->lbam | (tf->lbah << 8);
275 head = tf->device & 0xf;
278 block = (cyl * dev->heads + head) * dev->sectors + sect;
285 * ata_build_rw_tf - Build ATA taskfile for given read/write request
286 * @tf: Target ATA taskfile
287 * @dev: ATA device @tf belongs to
288 * @block: Block address
289 * @n_block: Number of blocks
290 * @tf_flags: RW/FUA etc...
296 * Build ATA taskfile @tf for read/write request described by
297 * @block, @n_block, @tf_flags and @tag on @dev.
301 * 0 on success, -ERANGE if the request is too large for @dev,
302 * -EINVAL if the request is invalid.
304 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
305 u64 block, u32 n_block, unsigned int tf_flags,
308 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
309 tf->flags |= tf_flags;
311 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
312 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
314 if (!lba_48_ok(block, n_block))
317 tf->protocol = ATA_PROT_NCQ;
318 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
320 if (tf->flags & ATA_TFLAG_WRITE)
321 tf->command = ATA_CMD_FPDMA_WRITE;
323 tf->command = ATA_CMD_FPDMA_READ;
325 tf->nsect = tag << 3;
326 tf->hob_feature = (n_block >> 8) & 0xff;
327 tf->feature = n_block & 0xff;
329 tf->hob_lbah = (block >> 40) & 0xff;
330 tf->hob_lbam = (block >> 32) & 0xff;
331 tf->hob_lbal = (block >> 24) & 0xff;
332 tf->lbah = (block >> 16) & 0xff;
333 tf->lbam = (block >> 8) & 0xff;
334 tf->lbal = block & 0xff;
337 if (tf->flags & ATA_TFLAG_FUA)
338 tf->device |= 1 << 7;
339 } else if (dev->flags & ATA_DFLAG_LBA) {
340 tf->flags |= ATA_TFLAG_LBA;
342 if (lba_28_ok(block, n_block)) {
344 tf->device |= (block >> 24) & 0xf;
345 } else if (lba_48_ok(block, n_block)) {
346 if (!(dev->flags & ATA_DFLAG_LBA48))
350 tf->flags |= ATA_TFLAG_LBA48;
352 tf->hob_nsect = (n_block >> 8) & 0xff;
354 tf->hob_lbah = (block >> 40) & 0xff;
355 tf->hob_lbam = (block >> 32) & 0xff;
356 tf->hob_lbal = (block >> 24) & 0xff;
358 /* request too large even for LBA48 */
361 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
364 tf->nsect = n_block & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
370 tf->device |= ATA_LBA;
373 u32 sect, head, cyl, track;
375 /* The request -may- be too large for CHS addressing. */
376 if (!lba_28_ok(block, n_block))
379 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
382 /* Convert LBA to CHS */
383 track = (u32)block / dev->sectors;
384 cyl = track / dev->heads;
385 head = track % dev->heads;
386 sect = (u32)block % dev->sectors + 1;
388 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
389 (u32)block, track, cyl, head, sect);
391 /* Check whether the converted CHS can fit.
395 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
398 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
410 * @pio_mask: pio_mask
411 * @mwdma_mask: mwdma_mask
412 * @udma_mask: udma_mask
414 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
415 * unsigned int xfer_mask.
423 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
424 unsigned int mwdma_mask,
425 unsigned int udma_mask)
427 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
428 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
429 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
433 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
434 * @xfer_mask: xfer_mask to unpack
435 * @pio_mask: resulting pio_mask
436 * @mwdma_mask: resulting mwdma_mask
437 * @udma_mask: resulting udma_mask
439 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
440 * Any NULL distination masks will be ignored.
442 static void ata_unpack_xfermask(unsigned int xfer_mask,
443 unsigned int *pio_mask,
444 unsigned int *mwdma_mask,
445 unsigned int *udma_mask)
448 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
450 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
452 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
455 static const struct ata_xfer_ent {
459 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
460 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
461 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
466 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
467 * @xfer_mask: xfer_mask of interest
469 * Return matching XFER_* value for @xfer_mask. Only the highest
470 * bit of @xfer_mask is considered.
476 * Matching XFER_* value, 0 if no match found.
478 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
480 int highbit = fls(xfer_mask) - 1;
481 const struct ata_xfer_ent *ent;
483 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
484 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
485 return ent->base + highbit - ent->shift;
490 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
491 * @xfer_mode: XFER_* of interest
493 * Return matching xfer_mask for @xfer_mode.
499 * Matching xfer_mask, 0 if no match found.
501 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
503 const struct ata_xfer_ent *ent;
505 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
506 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
507 return 1 << (ent->shift + xfer_mode - ent->base);
512 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
513 * @xfer_mode: XFER_* of interest
515 * Return matching xfer_shift for @xfer_mode.
521 * Matching xfer_shift, -1 if no match found.
523 static int ata_xfer_mode2shift(unsigned int xfer_mode)
525 const struct ata_xfer_ent *ent;
527 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
528 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
534 * ata_mode_string - convert xfer_mask to string
535 * @xfer_mask: mask of bits supported; only highest bit counts.
537 * Determine string which represents the highest speed
538 * (highest bit in @modemask).
544 * Constant C string representing highest speed listed in
545 * @mode_mask, or the constant C string "<n/a>".
547 static const char *ata_mode_string(unsigned int xfer_mask)
549 static const char * const xfer_mode_str[] = {
573 highbit = fls(xfer_mask) - 1;
574 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
575 return xfer_mode_str[highbit];
579 static const char *sata_spd_string(unsigned int spd)
581 static const char * const spd_str[] = {
586 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
588 return spd_str[spd - 1];
591 void ata_dev_disable(struct ata_device *dev)
593 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
594 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
600 * ata_pio_devchk - PATA device presence detection
601 * @ap: ATA channel to examine
602 * @device: Device to examine (starting at zero)
604 * This technique was originally described in
605 * Hale Landis's ATADRVR (www.ata-atapi.com), and
606 * later found its way into the ATA/ATAPI spec.
608 * Write a pattern to the ATA shadow registers,
609 * and if a device is present, it will respond by
610 * correctly storing and echoing back the
611 * ATA shadow register contents.
617 static unsigned int ata_pio_devchk(struct ata_port *ap,
620 struct ata_ioports *ioaddr = &ap->ioaddr;
623 ap->ops->dev_select(ap, device);
625 outb(0x55, ioaddr->nsect_addr);
626 outb(0xaa, ioaddr->lbal_addr);
628 outb(0xaa, ioaddr->nsect_addr);
629 outb(0x55, ioaddr->lbal_addr);
631 outb(0x55, ioaddr->nsect_addr);
632 outb(0xaa, ioaddr->lbal_addr);
634 nsect = inb(ioaddr->nsect_addr);
635 lbal = inb(ioaddr->lbal_addr);
637 if ((nsect == 0x55) && (lbal == 0xaa))
638 return 1; /* we found a device */
640 return 0; /* nothing found */
644 * ata_mmio_devchk - PATA device presence detection
645 * @ap: ATA channel to examine
646 * @device: Device to examine (starting at zero)
648 * This technique was originally described in
649 * Hale Landis's ATADRVR (www.ata-atapi.com), and
650 * later found its way into the ATA/ATAPI spec.
652 * Write a pattern to the ATA shadow registers,
653 * and if a device is present, it will respond by
654 * correctly storing and echoing back the
655 * ATA shadow register contents.
661 static unsigned int ata_mmio_devchk(struct ata_port *ap,
664 struct ata_ioports *ioaddr = &ap->ioaddr;
667 ap->ops->dev_select(ap, device);
669 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
670 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
672 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
673 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
675 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
676 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
678 nsect = readb((void __iomem *) ioaddr->nsect_addr);
679 lbal = readb((void __iomem *) ioaddr->lbal_addr);
681 if ((nsect == 0x55) && (lbal == 0xaa))
682 return 1; /* we found a device */
684 return 0; /* nothing found */
688 * ata_devchk - PATA device presence detection
689 * @ap: ATA channel to examine
690 * @device: Device to examine (starting at zero)
692 * Dispatch ATA device presence detection, depending
693 * on whether we are using PIO or MMIO to talk to the
694 * ATA shadow registers.
700 static unsigned int ata_devchk(struct ata_port *ap,
703 if (ap->flags & ATA_FLAG_MMIO)
704 return ata_mmio_devchk(ap, device);
705 return ata_pio_devchk(ap, device);
709 * ata_dev_classify - determine device type based on ATA-spec signature
710 * @tf: ATA taskfile register set for device to be identified
712 * Determine from taskfile register contents whether a device is
713 * ATA or ATAPI, as per "Signature and persistence" section
714 * of ATA/PI spec (volume 1, sect 5.14).
720 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
721 * the event of failure.
724 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
726 /* Apple's open source Darwin code hints that some devices only
727 * put a proper signature into the LBA mid/high registers,
728 * So, we only check those. It's sufficient for uniqueness.
731 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
732 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
733 DPRINTK("found ATA device by sig\n");
737 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
738 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
739 DPRINTK("found ATAPI device by sig\n");
740 return ATA_DEV_ATAPI;
743 DPRINTK("unknown device\n");
744 return ATA_DEV_UNKNOWN;
748 * ata_dev_try_classify - Parse returned ATA device signature
749 * @ap: ATA channel to examine
750 * @device: Device to examine (starting at zero)
751 * @r_err: Value of error register on completion
753 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
754 * an ATA/ATAPI-defined set of values is placed in the ATA
755 * shadow registers, indicating the results of device detection
758 * Select the ATA device, and read the values from the ATA shadow
759 * registers. Then parse according to the Error register value,
760 * and the spec-defined values examined by ata_dev_classify().
766 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
770 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
772 struct ata_taskfile tf;
776 ap->ops->dev_select(ap, device);
778 memset(&tf, 0, sizeof(tf));
780 ap->ops->tf_read(ap, &tf);
785 /* see if device passed diags: if master then continue and warn later */
786 if (err == 0 && device == 0)
787 /* diagnostic fail : do nothing _YET_ */
788 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
791 else if ((device == 0) && (err == 0x81))
796 /* determine if device is ATA or ATAPI */
797 class = ata_dev_classify(&tf);
799 if (class == ATA_DEV_UNKNOWN)
801 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
807 * ata_id_string - Convert IDENTIFY DEVICE page into string
808 * @id: IDENTIFY DEVICE results we will examine
809 * @s: string into which data is output
810 * @ofs: offset into identify device page
811 * @len: length of string to return. must be an even number.
813 * The strings in the IDENTIFY DEVICE page are broken up into
814 * 16-bit chunks. Run through the string, and output each
815 * 8-bit chunk linearly, regardless of platform.
821 void ata_id_string(const u16 *id, unsigned char *s,
822 unsigned int ofs, unsigned int len)
841 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
842 * @id: IDENTIFY DEVICE results we will examine
843 * @s: string into which data is output
844 * @ofs: offset into identify device page
845 * @len: length of string to return. must be an odd number.
847 * This function is identical to ata_id_string except that it
848 * trims trailing spaces and terminates the resulting string with
849 * null. @len must be actual maximum length (even number) + 1.
854 void ata_id_c_string(const u16 *id, unsigned char *s,
855 unsigned int ofs, unsigned int len)
861 ata_id_string(id, s, ofs, len - 1);
863 p = s + strnlen(s, len - 1);
864 while (p > s && p[-1] == ' ')
869 static u64 ata_id_n_sectors(const u16 *id)
871 if (ata_id_has_lba(id)) {
872 if (ata_id_has_lba48(id))
873 return ata_id_u64(id, 100);
875 return ata_id_u32(id, 60);
877 if (ata_id_current_chs_valid(id))
878 return ata_id_u32(id, 57);
880 return id[1] * id[3] * id[6];
885 * ata_noop_dev_select - Select device 0/1 on ATA bus
886 * @ap: ATA channel to manipulate
887 * @device: ATA device (numbered from zero) to select
889 * This function performs no actual function.
891 * May be used as the dev_select() entry in ata_port_operations.
896 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
902 * ata_std_dev_select - Select device 0/1 on ATA bus
903 * @ap: ATA channel to manipulate
904 * @device: ATA device (numbered from zero) to select
906 * Use the method defined in the ATA specification to
907 * make either device 0, or device 1, active on the
908 * ATA channel. Works with both PIO and MMIO.
910 * May be used as the dev_select() entry in ata_port_operations.
916 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
921 tmp = ATA_DEVICE_OBS;
923 tmp = ATA_DEVICE_OBS | ATA_DEV1;
925 if (ap->flags & ATA_FLAG_MMIO) {
926 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
928 outb(tmp, ap->ioaddr.device_addr);
930 ata_pause(ap); /* needed; also flushes, for mmio */
934 * ata_dev_select - Select device 0/1 on ATA bus
935 * @ap: ATA channel to manipulate
936 * @device: ATA device (numbered from zero) to select
937 * @wait: non-zero to wait for Status register BSY bit to clear
938 * @can_sleep: non-zero if context allows sleeping
940 * Use the method defined in the ATA specification to
941 * make either device 0, or device 1, active on the
944 * This is a high-level version of ata_std_dev_select(),
945 * which additionally provides the services of inserting
946 * the proper pauses and status polling, where needed.
952 void ata_dev_select(struct ata_port *ap, unsigned int device,
953 unsigned int wait, unsigned int can_sleep)
955 if (ata_msg_probe(ap))
956 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
957 "device %u, wait %u\n", ap->id, device, wait);
962 ap->ops->dev_select(ap, device);
965 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
972 * ata_dump_id - IDENTIFY DEVICE info debugging output
973 * @id: IDENTIFY DEVICE page to dump
975 * Dump selected 16-bit words from the given IDENTIFY DEVICE
982 static inline void ata_dump_id(const u16 *id)
984 DPRINTK("49==0x%04x "
994 DPRINTK("80==0x%04x "
1004 DPRINTK("88==0x%04x "
1011 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1012 * @id: IDENTIFY data to compute xfer mask from
1014 * Compute the xfermask for this device. This is not as trivial
1015 * as it seems if we must consider early devices correctly.
1017 * FIXME: pre IDE drive timing (do we care ?).
1025 static unsigned int ata_id_xfermask(const u16 *id)
1027 unsigned int pio_mask, mwdma_mask, udma_mask;
1029 /* Usual case. Word 53 indicates word 64 is valid */
1030 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1031 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1035 /* If word 64 isn't valid then Word 51 high byte holds
1036 * the PIO timing number for the maximum. Turn it into
1039 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
1040 if (mode < 5) /* Valid PIO range */
1041 pio_mask = (2 << mode) - 1;
1045 /* But wait.. there's more. Design your standards by
1046 * committee and you too can get a free iordy field to
1047 * process. However its the speeds not the modes that
1048 * are supported... Note drivers using the timing API
1049 * will get this right anyway
1053 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1055 if (ata_id_is_cfa(id)) {
1057 * Process compact flash extended modes
1059 int pio = id[163] & 0x7;
1060 int dma = (id[163] >> 3) & 7;
1063 pio_mask |= (1 << 5);
1065 pio_mask |= (1 << 6);
1067 mwdma_mask |= (1 << 3);
1069 mwdma_mask |= (1 << 4);
1073 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1074 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1076 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1080 * ata_port_queue_task - Queue port_task
1081 * @ap: The ata_port to queue port_task for
1082 * @fn: workqueue function to be scheduled
1083 * @data: data value to pass to workqueue function
1084 * @delay: delay time for workqueue function
1086 * Schedule @fn(@data) for execution after @delay jiffies using
1087 * port_task. There is one port_task per port and it's the
1088 * user(low level driver)'s responsibility to make sure that only
1089 * one task is active at any given time.
1091 * libata core layer takes care of synchronization between
1092 * port_task and EH. ata_port_queue_task() may be ignored for EH
1096 * Inherited from caller.
1098 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
1099 unsigned long delay)
1103 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1106 PREPARE_WORK(&ap->port_task, fn, data);
1109 rc = queue_work(ata_wq, &ap->port_task);
1111 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1113 /* rc == 0 means that another user is using port task */
1118 * ata_port_flush_task - Flush port_task
1119 * @ap: The ata_port to flush port_task for
1121 * After this function completes, port_task is guranteed not to
1122 * be running or scheduled.
1125 * Kernel thread context (may sleep)
1127 void ata_port_flush_task(struct ata_port *ap)
1129 unsigned long flags;
1133 spin_lock_irqsave(ap->lock, flags);
1134 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1135 spin_unlock_irqrestore(ap->lock, flags);
1137 DPRINTK("flush #1\n");
1138 flush_workqueue(ata_wq);
1141 * At this point, if a task is running, it's guaranteed to see
1142 * the FLUSH flag; thus, it will never queue pio tasks again.
1145 if (!cancel_delayed_work(&ap->port_task)) {
1146 if (ata_msg_ctl(ap))
1147 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1149 flush_workqueue(ata_wq);
1152 spin_lock_irqsave(ap->lock, flags);
1153 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1154 spin_unlock_irqrestore(ap->lock, flags);
1156 if (ata_msg_ctl(ap))
1157 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1160 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1162 struct completion *waiting = qc->private_data;
1168 * ata_exec_internal_sg - execute libata internal command
1169 * @dev: Device to which the command is sent
1170 * @tf: Taskfile registers for the command and the result
1171 * @cdb: CDB for packet command
1172 * @dma_dir: Data tranfer direction of the command
1173 * @sg: sg list for the data buffer of the command
1174 * @n_elem: Number of sg entries
1176 * Executes libata internal command with timeout. @tf contains
1177 * command on entry and result on return. Timeout and error
1178 * conditions are reported via return value. No recovery action
1179 * is taken after a command times out. It's caller's duty to
1180 * clean up after timeout.
1183 * None. Should be called with kernel context, might sleep.
1186 * Zero on success, AC_ERR_* mask on failure
1188 unsigned ata_exec_internal_sg(struct ata_device *dev,
1189 struct ata_taskfile *tf, const u8 *cdb,
1190 int dma_dir, struct scatterlist *sg,
1191 unsigned int n_elem)
1193 struct ata_port *ap = dev->ap;
1194 u8 command = tf->command;
1195 struct ata_queued_cmd *qc;
1196 unsigned int tag, preempted_tag;
1197 u32 preempted_sactive, preempted_qc_active;
1198 DECLARE_COMPLETION_ONSTACK(wait);
1199 unsigned long flags;
1200 unsigned int err_mask;
1203 spin_lock_irqsave(ap->lock, flags);
1205 /* no internal command while frozen */
1206 if (ap->pflags & ATA_PFLAG_FROZEN) {
1207 spin_unlock_irqrestore(ap->lock, flags);
1208 return AC_ERR_SYSTEM;
1211 /* initialize internal qc */
1213 /* XXX: Tag 0 is used for drivers with legacy EH as some
1214 * drivers choke if any other tag is given. This breaks
1215 * ata_tag_internal() test for those drivers. Don't use new
1216 * EH stuff without converting to it.
1218 if (ap->ops->error_handler)
1219 tag = ATA_TAG_INTERNAL;
1223 if (test_and_set_bit(tag, &ap->qc_allocated))
1225 qc = __ata_qc_from_tag(ap, tag);
1233 preempted_tag = ap->active_tag;
1234 preempted_sactive = ap->sactive;
1235 preempted_qc_active = ap->qc_active;
1236 ap->active_tag = ATA_TAG_POISON;
1240 /* prepare & issue qc */
1243 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1244 qc->flags |= ATA_QCFLAG_RESULT_TF;
1245 qc->dma_dir = dma_dir;
1246 if (dma_dir != DMA_NONE) {
1247 unsigned int i, buflen = 0;
1249 for (i = 0; i < n_elem; i++)
1250 buflen += sg[i].length;
1252 ata_sg_init(qc, sg, n_elem);
1253 qc->nsect = buflen / ATA_SECT_SIZE;
1256 qc->private_data = &wait;
1257 qc->complete_fn = ata_qc_complete_internal;
1261 spin_unlock_irqrestore(ap->lock, flags);
1263 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1265 ata_port_flush_task(ap);
1268 spin_lock_irqsave(ap->lock, flags);
1270 /* We're racing with irq here. If we lose, the
1271 * following test prevents us from completing the qc
1272 * twice. If we win, the port is frozen and will be
1273 * cleaned up by ->post_internal_cmd().
1275 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1276 qc->err_mask |= AC_ERR_TIMEOUT;
1278 if (ap->ops->error_handler)
1279 ata_port_freeze(ap);
1281 ata_qc_complete(qc);
1283 if (ata_msg_warn(ap))
1284 ata_dev_printk(dev, KERN_WARNING,
1285 "qc timeout (cmd 0x%x)\n", command);
1288 spin_unlock_irqrestore(ap->lock, flags);
1291 /* do post_internal_cmd */
1292 if (ap->ops->post_internal_cmd)
1293 ap->ops->post_internal_cmd(qc);
1295 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1296 if (ata_msg_warn(ap))
1297 ata_dev_printk(dev, KERN_WARNING,
1298 "zero err_mask for failed "
1299 "internal command, assuming AC_ERR_OTHER\n");
1300 qc->err_mask |= AC_ERR_OTHER;
1304 spin_lock_irqsave(ap->lock, flags);
1306 *tf = qc->result_tf;
1307 err_mask = qc->err_mask;
1310 ap->active_tag = preempted_tag;
1311 ap->sactive = preempted_sactive;
1312 ap->qc_active = preempted_qc_active;
1314 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1315 * Until those drivers are fixed, we detect the condition
1316 * here, fail the command with AC_ERR_SYSTEM and reenable the
1319 * Note that this doesn't change any behavior as internal
1320 * command failure results in disabling the device in the
1321 * higher layer for LLDDs without new reset/EH callbacks.
1323 * Kill the following code as soon as those drivers are fixed.
1325 if (ap->flags & ATA_FLAG_DISABLED) {
1326 err_mask |= AC_ERR_SYSTEM;
1330 spin_unlock_irqrestore(ap->lock, flags);
1336 * ata_exec_internal_sg - execute libata internal command
1337 * @dev: Device to which the command is sent
1338 * @tf: Taskfile registers for the command and the result
1339 * @cdb: CDB for packet command
1340 * @dma_dir: Data tranfer direction of the command
1341 * @buf: Data buffer of the command
1342 * @buflen: Length of data buffer
1344 * Wrapper around ata_exec_internal_sg() which takes simple
1345 * buffer instead of sg list.
1348 * None. Should be called with kernel context, might sleep.
1351 * Zero on success, AC_ERR_* mask on failure
1353 unsigned ata_exec_internal(struct ata_device *dev,
1354 struct ata_taskfile *tf, const u8 *cdb,
1355 int dma_dir, void *buf, unsigned int buflen)
1357 struct scatterlist sg;
1359 sg_init_one(&sg, buf, buflen);
1361 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
1365 * ata_do_simple_cmd - execute simple internal command
1366 * @dev: Device to which the command is sent
1367 * @cmd: Opcode to execute
1369 * Execute a 'simple' command, that only consists of the opcode
1370 * 'cmd' itself, without filling any other registers
1373 * Kernel thread context (may sleep).
1376 * Zero on success, AC_ERR_* mask on failure
1378 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1380 struct ata_taskfile tf;
1382 ata_tf_init(dev, &tf);
1385 tf.flags |= ATA_TFLAG_DEVICE;
1386 tf.protocol = ATA_PROT_NODATA;
1388 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1392 * ata_pio_need_iordy - check if iordy needed
1395 * Check if the current speed of the device requires IORDY. Used
1396 * by various controllers for chip configuration.
1399 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1402 int speed = adev->pio_mode - XFER_PIO_0;
1409 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1411 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1412 pio = adev->id[ATA_ID_EIDE_PIO];
1413 /* Is the speed faster than the drive allows non IORDY ? */
1415 /* This is cycle times not frequency - watch the logic! */
1416 if (pio > 240) /* PIO2 is 240nS per cycle */
1425 * ata_dev_read_id - Read ID data from the specified device
1426 * @dev: target device
1427 * @p_class: pointer to class of the target device (may be changed)
1428 * @flags: ATA_READID_* flags
1429 * @id: buffer to read IDENTIFY data into
1431 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1432 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1433 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1434 * for pre-ATA4 drives.
1437 * Kernel thread context (may sleep)
1440 * 0 on success, -errno otherwise.
1442 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1443 unsigned int flags, u16 *id)
1445 struct ata_port *ap = dev->ap;
1446 unsigned int class = *p_class;
1447 struct ata_taskfile tf;
1448 unsigned int err_mask = 0;
1452 if (ata_msg_ctl(ap))
1453 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1454 __FUNCTION__, ap->id, dev->devno);
1456 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1459 ata_tf_init(dev, &tf);
1463 tf.command = ATA_CMD_ID_ATA;
1466 tf.command = ATA_CMD_ID_ATAPI;
1470 reason = "unsupported class";
1474 tf.protocol = ATA_PROT_PIO;
1476 /* presence detection using polling IDENTIFY? */
1477 if (flags & ATA_READID_DETECT)
1478 tf.flags |= ATA_TFLAG_POLLING;
1480 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1481 id, sizeof(id[0]) * ATA_ID_WORDS);
1483 if ((flags & ATA_READID_DETECT) &&
1484 (err_mask & AC_ERR_NODEV_HINT)) {
1485 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1486 ap->id, dev->devno);
1491 reason = "I/O error";
1495 swap_buf_le16(id, ATA_ID_WORDS);
1499 reason = "device reports illegal type";
1501 if (class == ATA_DEV_ATA) {
1502 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1505 if (ata_id_is_ata(id))
1509 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1511 * The exact sequence expected by certain pre-ATA4 drives is:
1514 * INITIALIZE DEVICE PARAMETERS
1516 * Some drives were very specific about that exact sequence.
1518 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1519 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1522 reason = "INIT_DEV_PARAMS failed";
1526 /* current CHS translation info (id[53-58]) might be
1527 * changed. reread the identify device info.
1529 flags &= ~ATA_READID_POSTRESET;
1539 if (ata_msg_warn(ap))
1540 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1541 "(%s, err_mask=0x%x)\n", reason, err_mask);
1545 static inline u8 ata_dev_knobble(struct ata_device *dev)
1547 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1550 static void ata_dev_config_ncq(struct ata_device *dev,
1551 char *desc, size_t desc_sz)
1553 struct ata_port *ap = dev->ap;
1554 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1556 if (!ata_id_has_ncq(dev->id)) {
1560 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1561 snprintf(desc, desc_sz, "NCQ (not used)");
1564 if (ap->flags & ATA_FLAG_NCQ) {
1565 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1566 dev->flags |= ATA_DFLAG_NCQ;
1569 if (hdepth >= ddepth)
1570 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1572 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1575 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1579 if (ap->scsi_host) {
1580 unsigned int len = 0;
1582 for (i = 0; i < ATA_MAX_DEVICES; i++)
1583 len = max(len, ap->device[i].cdb_len);
1585 ap->scsi_host->max_cmd_len = len;
1590 * ata_dev_configure - Configure the specified ATA/ATAPI device
1591 * @dev: Target device to configure
1593 * Configure @dev according to @dev->id. Generic and low-level
1594 * driver specific fixups are also applied.
1597 * Kernel thread context (may sleep)
1600 * 0 on success, -errno otherwise
1602 int ata_dev_configure(struct ata_device *dev)
1604 struct ata_port *ap = dev->ap;
1605 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1606 const u16 *id = dev->id;
1607 unsigned int xfer_mask;
1608 char revbuf[7]; /* XYZ-99\0 */
1611 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1612 ata_dev_printk(dev, KERN_INFO,
1613 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1614 __FUNCTION__, ap->id, dev->devno);
1618 if (ata_msg_probe(ap))
1619 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1620 __FUNCTION__, ap->id, dev->devno);
1622 /* print device capabilities */
1623 if (ata_msg_probe(ap))
1624 ata_dev_printk(dev, KERN_DEBUG,
1625 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1626 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1628 id[49], id[82], id[83], id[84],
1629 id[85], id[86], id[87], id[88]);
1631 /* initialize to-be-configured parameters */
1632 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1633 dev->max_sectors = 0;
1641 * common ATA, ATAPI feature tests
1644 /* find max transfer mode; for printk only */
1645 xfer_mask = ata_id_xfermask(id);
1647 if (ata_msg_probe(ap))
1650 /* ATA-specific feature tests */
1651 if (dev->class == ATA_DEV_ATA) {
1652 if (ata_id_is_cfa(id)) {
1653 if (id[162] & 1) /* CPRM may make this media unusable */
1654 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1655 ap->id, dev->devno);
1656 snprintf(revbuf, 7, "CFA");
1659 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1661 dev->n_sectors = ata_id_n_sectors(id);
1663 if (ata_id_has_lba(id)) {
1664 const char *lba_desc;
1668 dev->flags |= ATA_DFLAG_LBA;
1669 if (ata_id_has_lba48(id)) {
1670 dev->flags |= ATA_DFLAG_LBA48;
1673 if (dev->n_sectors >= (1UL << 28) &&
1674 ata_id_has_flush_ext(id))
1675 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1679 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1681 /* print device info to dmesg */
1682 if (ata_msg_drv(ap) && print_info)
1683 ata_dev_printk(dev, KERN_INFO, "%s, "
1684 "max %s, %Lu sectors: %s %s\n",
1686 ata_mode_string(xfer_mask),
1687 (unsigned long long)dev->n_sectors,
1688 lba_desc, ncq_desc);
1692 /* Default translation */
1693 dev->cylinders = id[1];
1695 dev->sectors = id[6];
1697 if (ata_id_current_chs_valid(id)) {
1698 /* Current CHS translation is valid. */
1699 dev->cylinders = id[54];
1700 dev->heads = id[55];
1701 dev->sectors = id[56];
1704 /* print device info to dmesg */
1705 if (ata_msg_drv(ap) && print_info)
1706 ata_dev_printk(dev, KERN_INFO, "%s, "
1707 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1709 ata_mode_string(xfer_mask),
1710 (unsigned long long)dev->n_sectors,
1711 dev->cylinders, dev->heads,
1715 if (dev->id[59] & 0x100) {
1716 dev->multi_count = dev->id[59] & 0xff;
1717 if (ata_msg_drv(ap) && print_info)
1718 ata_dev_printk(dev, KERN_INFO,
1719 "ata%u: dev %u multi count %u\n",
1720 ap->id, dev->devno, dev->multi_count);
1726 /* ATAPI-specific feature tests */
1727 else if (dev->class == ATA_DEV_ATAPI) {
1728 char *cdb_intr_string = "";
1730 rc = atapi_cdb_len(id);
1731 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1732 if (ata_msg_warn(ap))
1733 ata_dev_printk(dev, KERN_WARNING,
1734 "unsupported CDB len\n");
1738 dev->cdb_len = (unsigned int) rc;
1740 if (ata_id_cdb_intr(dev->id)) {
1741 dev->flags |= ATA_DFLAG_CDB_INTR;
1742 cdb_intr_string = ", CDB intr";
1745 /* print device info to dmesg */
1746 if (ata_msg_drv(ap) && print_info)
1747 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1748 ata_mode_string(xfer_mask),
1752 /* determine max_sectors */
1753 dev->max_sectors = ATA_MAX_SECTORS;
1754 if (dev->flags & ATA_DFLAG_LBA48)
1755 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1757 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1758 /* Let the user know. We don't want to disallow opens for
1759 rescue purposes, or in case the vendor is just a blithering
1762 ata_dev_printk(dev, KERN_WARNING,
1763 "Drive reports diagnostics failure. This may indicate a drive\n");
1764 ata_dev_printk(dev, KERN_WARNING,
1765 "fault or invalid emulation. Contact drive vendor for information.\n");
1769 ata_set_port_max_cmd_len(ap);
1771 /* limit bridge transfers to udma5, 200 sectors */
1772 if (ata_dev_knobble(dev)) {
1773 if (ata_msg_drv(ap) && print_info)
1774 ata_dev_printk(dev, KERN_INFO,
1775 "applying bridge limits\n");
1776 dev->udma_mask &= ATA_UDMA5;
1777 dev->max_sectors = ATA_MAX_SECTORS;
1780 if (ap->ops->dev_config)
1781 ap->ops->dev_config(ap, dev);
1783 if (ata_msg_probe(ap))
1784 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1785 __FUNCTION__, ata_chk_status(ap));
1789 if (ata_msg_probe(ap))
1790 ata_dev_printk(dev, KERN_DEBUG,
1791 "%s: EXIT, err\n", __FUNCTION__);
1796 * ata_bus_probe - Reset and probe ATA bus
1799 * Master ATA bus probing function. Initiates a hardware-dependent
1800 * bus reset, then attempts to identify any devices found on
1804 * PCI/etc. bus probe sem.
1807 * Zero on success, negative errno otherwise.
1810 int ata_bus_probe(struct ata_port *ap)
1812 unsigned int classes[ATA_MAX_DEVICES];
1813 int tries[ATA_MAX_DEVICES];
1814 int i, rc, down_xfermask;
1815 struct ata_device *dev;
1819 for (i = 0; i < ATA_MAX_DEVICES; i++)
1820 tries[i] = ATA_PROBE_MAX_TRIES;
1825 /* reset and determine device classes */
1826 ap->ops->phy_reset(ap);
1828 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1829 dev = &ap->device[i];
1831 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1832 dev->class != ATA_DEV_UNKNOWN)
1833 classes[dev->devno] = dev->class;
1835 classes[dev->devno] = ATA_DEV_NONE;
1837 dev->class = ATA_DEV_UNKNOWN;
1842 /* after the reset the device state is PIO 0 and the controller
1843 state is undefined. Record the mode */
1845 for (i = 0; i < ATA_MAX_DEVICES; i++)
1846 ap->device[i].pio_mode = XFER_PIO_0;
1848 /* read IDENTIFY page and configure devices */
1849 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1850 dev = &ap->device[i];
1853 dev->class = classes[i];
1855 if (!ata_dev_enabled(dev))
1858 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1863 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1864 rc = ata_dev_configure(dev);
1865 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1870 /* configure transfer mode */
1871 rc = ata_set_mode(ap, &dev);
1877 for (i = 0; i < ATA_MAX_DEVICES; i++)
1878 if (ata_dev_enabled(&ap->device[i]))
1881 /* no device present, disable port */
1882 ata_port_disable(ap);
1883 ap->ops->port_disable(ap);
1890 tries[dev->devno] = 0;
1893 sata_down_spd_limit(ap);
1896 tries[dev->devno]--;
1897 if (down_xfermask &&
1898 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1899 tries[dev->devno] = 0;
1902 if (!tries[dev->devno]) {
1903 ata_down_xfermask_limit(dev, 1);
1904 ata_dev_disable(dev);
1911 * ata_port_probe - Mark port as enabled
1912 * @ap: Port for which we indicate enablement
1914 * Modify @ap data structure such that the system
1915 * thinks that the entire port is enabled.
1917 * LOCKING: host lock, or some other form of
1921 void ata_port_probe(struct ata_port *ap)
1923 ap->flags &= ~ATA_FLAG_DISABLED;
1927 * sata_print_link_status - Print SATA link status
1928 * @ap: SATA port to printk link status about
1930 * This function prints link speed and status of a SATA link.
1935 static void sata_print_link_status(struct ata_port *ap)
1937 u32 sstatus, scontrol, tmp;
1939 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1941 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1943 if (ata_port_online(ap)) {
1944 tmp = (sstatus >> 4) & 0xf;
1945 ata_port_printk(ap, KERN_INFO,
1946 "SATA link up %s (SStatus %X SControl %X)\n",
1947 sata_spd_string(tmp), sstatus, scontrol);
1949 ata_port_printk(ap, KERN_INFO,
1950 "SATA link down (SStatus %X SControl %X)\n",
1956 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1957 * @ap: SATA port associated with target SATA PHY.
1959 * This function issues commands to standard SATA Sxxx
1960 * PHY registers, to wake up the phy (and device), and
1961 * clear any reset condition.
1964 * PCI/etc. bus probe sem.
1967 void __sata_phy_reset(struct ata_port *ap)
1970 unsigned long timeout = jiffies + (HZ * 5);
1972 if (ap->flags & ATA_FLAG_SATA_RESET) {
1973 /* issue phy wake/reset */
1974 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1975 /* Couldn't find anything in SATA I/II specs, but
1976 * AHCI-1.1 10.4.2 says at least 1 ms. */
1979 /* phy wake/clear reset */
1980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1982 /* wait for phy to become ready, if necessary */
1985 sata_scr_read(ap, SCR_STATUS, &sstatus);
1986 if ((sstatus & 0xf) != 1)
1988 } while (time_before(jiffies, timeout));
1990 /* print link status */
1991 sata_print_link_status(ap);
1993 /* TODO: phy layer with polling, timeouts, etc. */
1994 if (!ata_port_offline(ap))
1997 ata_port_disable(ap);
1999 if (ap->flags & ATA_FLAG_DISABLED)
2002 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2003 ata_port_disable(ap);
2007 ap->cbl = ATA_CBL_SATA;
2011 * sata_phy_reset - Reset SATA bus.
2012 * @ap: SATA port associated with target SATA PHY.
2014 * This function resets the SATA bus, and then probes
2015 * the bus for devices.
2018 * PCI/etc. bus probe sem.
2021 void sata_phy_reset(struct ata_port *ap)
2023 __sata_phy_reset(ap);
2024 if (ap->flags & ATA_FLAG_DISABLED)
2030 * ata_dev_pair - return other device on cable
2033 * Obtain the other device on the same cable, or if none is
2034 * present NULL is returned
2037 struct ata_device *ata_dev_pair(struct ata_device *adev)
2039 struct ata_port *ap = adev->ap;
2040 struct ata_device *pair = &ap->device[1 - adev->devno];
2041 if (!ata_dev_enabled(pair))
2047 * ata_port_disable - Disable port.
2048 * @ap: Port to be disabled.
2050 * Modify @ap data structure such that the system
2051 * thinks that the entire port is disabled, and should
2052 * never attempt to probe or communicate with devices
2055 * LOCKING: host lock, or some other form of
2059 void ata_port_disable(struct ata_port *ap)
2061 ap->device[0].class = ATA_DEV_NONE;
2062 ap->device[1].class = ATA_DEV_NONE;
2063 ap->flags |= ATA_FLAG_DISABLED;
2067 * sata_down_spd_limit - adjust SATA spd limit downward
2068 * @ap: Port to adjust SATA spd limit for
2070 * Adjust SATA spd limit of @ap downward. Note that this
2071 * function only adjusts the limit. The change must be applied
2072 * using sata_set_spd().
2075 * Inherited from caller.
2078 * 0 on success, negative errno on failure
2080 int sata_down_spd_limit(struct ata_port *ap)
2082 u32 sstatus, spd, mask;
2085 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2089 mask = ap->sata_spd_limit;
2092 highbit = fls(mask) - 1;
2093 mask &= ~(1 << highbit);
2095 spd = (sstatus >> 4) & 0xf;
2099 mask &= (1 << spd) - 1;
2103 ap->sata_spd_limit = mask;
2105 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2106 sata_spd_string(fls(mask)));
2111 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2115 if (ap->sata_spd_limit == UINT_MAX)
2118 limit = fls(ap->sata_spd_limit);
2120 spd = (*scontrol >> 4) & 0xf;
2121 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2123 return spd != limit;
2127 * sata_set_spd_needed - is SATA spd configuration needed
2128 * @ap: Port in question
2130 * Test whether the spd limit in SControl matches
2131 * @ap->sata_spd_limit. This function is used to determine
2132 * whether hardreset is necessary to apply SATA spd
2136 * Inherited from caller.
2139 * 1 if SATA spd configuration is needed, 0 otherwise.
2141 int sata_set_spd_needed(struct ata_port *ap)
2145 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2148 return __sata_set_spd_needed(ap, &scontrol);
2152 * sata_set_spd - set SATA spd according to spd limit
2153 * @ap: Port to set SATA spd for
2155 * Set SATA spd of @ap according to sata_spd_limit.
2158 * Inherited from caller.
2161 * 0 if spd doesn't need to be changed, 1 if spd has been
2162 * changed. Negative errno if SCR registers are inaccessible.
2164 int sata_set_spd(struct ata_port *ap)
2169 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2172 if (!__sata_set_spd_needed(ap, &scontrol))
2175 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2182 * This mode timing computation functionality is ported over from
2183 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2186 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2187 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2188 * for UDMA6, which is currently supported only by Maxtor drives.
2190 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2193 static const struct ata_timing ata_timing[] = {
2195 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2196 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2197 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2198 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2200 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2201 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2202 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2203 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2204 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2206 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2208 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2209 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2210 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2212 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2213 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2214 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2216 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2217 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2218 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2219 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2221 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2222 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2223 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2225 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2230 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2231 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2233 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2235 q->setup = EZ(t->setup * 1000, T);
2236 q->act8b = EZ(t->act8b * 1000, T);
2237 q->rec8b = EZ(t->rec8b * 1000, T);
2238 q->cyc8b = EZ(t->cyc8b * 1000, T);
2239 q->active = EZ(t->active * 1000, T);
2240 q->recover = EZ(t->recover * 1000, T);
2241 q->cycle = EZ(t->cycle * 1000, T);
2242 q->udma = EZ(t->udma * 1000, UT);
2245 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2246 struct ata_timing *m, unsigned int what)
2248 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2249 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2250 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2251 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2252 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2253 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2254 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2255 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2258 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2260 const struct ata_timing *t;
2262 for (t = ata_timing; t->mode != speed; t++)
2263 if (t->mode == 0xFF)
2268 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2269 struct ata_timing *t, int T, int UT)
2271 const struct ata_timing *s;
2272 struct ata_timing p;
2278 if (!(s = ata_timing_find_mode(speed)))
2281 memcpy(t, s, sizeof(*s));
2284 * If the drive is an EIDE drive, it can tell us it needs extended
2285 * PIO/MW_DMA cycle timing.
2288 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2289 memset(&p, 0, sizeof(p));
2290 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2291 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2292 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2293 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2294 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2296 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2300 * Convert the timing to bus clock counts.
2303 ata_timing_quantize(t, t, T, UT);
2306 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2307 * S.M.A.R.T * and some other commands. We have to ensure that the
2308 * DMA cycle timing is slower/equal than the fastest PIO timing.
2311 if (speed > XFER_PIO_4) {
2312 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2313 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2317 * Lengthen active & recovery time so that cycle time is correct.
2320 if (t->act8b + t->rec8b < t->cyc8b) {
2321 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2322 t->rec8b = t->cyc8b - t->act8b;
2325 if (t->active + t->recover < t->cycle) {
2326 t->active += (t->cycle - (t->active + t->recover)) / 2;
2327 t->recover = t->cycle - t->active;
2334 * ata_down_xfermask_limit - adjust dev xfer masks downward
2335 * @dev: Device to adjust xfer masks
2336 * @force_pio0: Force PIO0
2338 * Adjust xfer masks of @dev downward. Note that this function
2339 * does not apply the change. Invoking ata_set_mode() afterwards
2340 * will apply the limit.
2343 * Inherited from caller.
2346 * 0 on success, negative errno on failure
2348 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2350 unsigned long xfer_mask;
2353 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2358 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2359 if (xfer_mask & ATA_MASK_UDMA)
2360 xfer_mask &= ~ATA_MASK_MWDMA;
2362 highbit = fls(xfer_mask) - 1;
2363 xfer_mask &= ~(1 << highbit);
2365 xfer_mask &= 1 << ATA_SHIFT_PIO;
2369 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2372 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2373 ata_mode_string(xfer_mask));
2381 static int ata_dev_set_mode(struct ata_device *dev)
2383 struct ata_eh_context *ehc = &dev->ap->eh_context;
2384 unsigned int err_mask;
2387 dev->flags &= ~ATA_DFLAG_PIO;
2388 if (dev->xfer_shift == ATA_SHIFT_PIO)
2389 dev->flags |= ATA_DFLAG_PIO;
2391 err_mask = ata_dev_set_xfermode(dev);
2393 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2394 "(err_mask=0x%x)\n", err_mask);
2398 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2399 rc = ata_dev_revalidate(dev, 0);
2400 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2404 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2405 dev->xfer_shift, (int)dev->xfer_mode);
2407 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2408 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2413 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2414 * @ap: port on which timings will be programmed
2415 * @r_failed_dev: out paramter for failed device
2417 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2418 * ata_set_mode() fails, pointer to the failing device is
2419 * returned in @r_failed_dev.
2422 * PCI/etc. bus probe sem.
2425 * 0 on success, negative errno otherwise
2427 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2429 struct ata_device *dev;
2430 int i, rc = 0, used_dma = 0, found = 0;
2432 /* has private set_mode? */
2433 if (ap->ops->set_mode) {
2434 /* FIXME: make ->set_mode handle no device case and
2435 * return error code and failing device on failure.
2437 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2438 if (ata_dev_ready(&ap->device[i])) {
2439 ap->ops->set_mode(ap);
2446 /* step 1: calculate xfer_mask */
2447 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2448 unsigned int pio_mask, dma_mask;
2450 dev = &ap->device[i];
2452 if (!ata_dev_enabled(dev))
2455 ata_dev_xfermask(dev);
2457 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2459 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2460 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2469 /* step 2: always set host PIO timings */
2470 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2471 dev = &ap->device[i];
2472 if (!ata_dev_enabled(dev))
2475 if (!dev->pio_mode) {
2476 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2481 dev->xfer_mode = dev->pio_mode;
2482 dev->xfer_shift = ATA_SHIFT_PIO;
2483 if (ap->ops->set_piomode)
2484 ap->ops->set_piomode(ap, dev);
2487 /* step 3: set host DMA timings */
2488 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2489 dev = &ap->device[i];
2491 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2494 dev->xfer_mode = dev->dma_mode;
2495 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2496 if (ap->ops->set_dmamode)
2497 ap->ops->set_dmamode(ap, dev);
2500 /* step 4: update devices' xfer mode */
2501 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2502 dev = &ap->device[i];
2504 /* don't udpate suspended devices' xfer mode */
2505 if (!ata_dev_ready(dev))
2508 rc = ata_dev_set_mode(dev);
2513 /* Record simplex status. If we selected DMA then the other
2514 * host channels are not permitted to do so.
2516 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2517 ap->host->simplex_claimed = 1;
2519 /* step5: chip specific finalisation */
2520 if (ap->ops->post_set_mode)
2521 ap->ops->post_set_mode(ap);
2525 *r_failed_dev = dev;
2530 * ata_tf_to_host - issue ATA taskfile to host controller
2531 * @ap: port to which command is being issued
2532 * @tf: ATA taskfile register set
2534 * Issues ATA taskfile register set to ATA host controller,
2535 * with proper synchronization with interrupt handler and
2539 * spin_lock_irqsave(host lock)
2542 static inline void ata_tf_to_host(struct ata_port *ap,
2543 const struct ata_taskfile *tf)
2545 ap->ops->tf_load(ap, tf);
2546 ap->ops->exec_command(ap, tf);
2550 * ata_busy_sleep - sleep until BSY clears, or timeout
2551 * @ap: port containing status register to be polled
2552 * @tmout_pat: impatience timeout
2553 * @tmout: overall timeout
2555 * Sleep until ATA Status register bit BSY clears,
2556 * or a timeout occurs.
2559 * Kernel thread context (may sleep).
2562 * 0 on success, -errno otherwise.
2564 int ata_busy_sleep(struct ata_port *ap,
2565 unsigned long tmout_pat, unsigned long tmout)
2567 unsigned long timer_start, timeout;
2570 status = ata_busy_wait(ap, ATA_BUSY, 300);
2571 timer_start = jiffies;
2572 timeout = timer_start + tmout_pat;
2573 while (status != 0xff && (status & ATA_BUSY) &&
2574 time_before(jiffies, timeout)) {
2576 status = ata_busy_wait(ap, ATA_BUSY, 3);
2579 if (status != 0xff && (status & ATA_BUSY))
2580 ata_port_printk(ap, KERN_WARNING,
2581 "port is slow to respond, please be patient "
2582 "(Status 0x%x)\n", status);
2584 timeout = timer_start + tmout;
2585 while (status != 0xff && (status & ATA_BUSY) &&
2586 time_before(jiffies, timeout)) {
2588 status = ata_chk_status(ap);
2594 if (status & ATA_BUSY) {
2595 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2596 "(%lu secs, Status 0x%x)\n",
2597 tmout / HZ, status);
2604 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2606 struct ata_ioports *ioaddr = &ap->ioaddr;
2607 unsigned int dev0 = devmask & (1 << 0);
2608 unsigned int dev1 = devmask & (1 << 1);
2609 unsigned long timeout;
2611 /* if device 0 was found in ata_devchk, wait for its
2615 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2617 /* if device 1 was found in ata_devchk, wait for
2618 * register access, then wait for BSY to clear
2620 timeout = jiffies + ATA_TMOUT_BOOT;
2624 ap->ops->dev_select(ap, 1);
2625 if (ap->flags & ATA_FLAG_MMIO) {
2626 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2627 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2629 nsect = inb(ioaddr->nsect_addr);
2630 lbal = inb(ioaddr->lbal_addr);
2632 if ((nsect == 1) && (lbal == 1))
2634 if (time_after(jiffies, timeout)) {
2638 msleep(50); /* give drive a breather */
2641 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2643 /* is all this really necessary? */
2644 ap->ops->dev_select(ap, 0);
2646 ap->ops->dev_select(ap, 1);
2648 ap->ops->dev_select(ap, 0);
2651 static unsigned int ata_bus_softreset(struct ata_port *ap,
2652 unsigned int devmask)
2654 struct ata_ioports *ioaddr = &ap->ioaddr;
2656 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2658 /* software reset. causes dev0 to be selected */
2659 if (ap->flags & ATA_FLAG_MMIO) {
2660 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2661 udelay(20); /* FIXME: flush */
2662 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2663 udelay(20); /* FIXME: flush */
2664 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2666 outb(ap->ctl, ioaddr->ctl_addr);
2668 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2670 outb(ap->ctl, ioaddr->ctl_addr);
2673 /* spec mandates ">= 2ms" before checking status.
2674 * We wait 150ms, because that was the magic delay used for
2675 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2676 * between when the ATA command register is written, and then
2677 * status is checked. Because waiting for "a while" before
2678 * checking status is fine, post SRST, we perform this magic
2679 * delay here as well.
2681 * Old drivers/ide uses the 2mS rule and then waits for ready
2685 /* Before we perform post reset processing we want to see if
2686 * the bus shows 0xFF because the odd clown forgets the D7
2687 * pulldown resistor.
2689 if (ata_check_status(ap) == 0xFF)
2692 ata_bus_post_reset(ap, devmask);
2698 * ata_bus_reset - reset host port and associated ATA channel
2699 * @ap: port to reset
2701 * This is typically the first time we actually start issuing
2702 * commands to the ATA channel. We wait for BSY to clear, then
2703 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2704 * result. Determine what devices, if any, are on the channel
2705 * by looking at the device 0/1 error register. Look at the signature
2706 * stored in each device's taskfile registers, to determine if
2707 * the device is ATA or ATAPI.
2710 * PCI/etc. bus probe sem.
2711 * Obtains host lock.
2714 * Sets ATA_FLAG_DISABLED if bus reset fails.
2717 void ata_bus_reset(struct ata_port *ap)
2719 struct ata_ioports *ioaddr = &ap->ioaddr;
2720 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2722 unsigned int dev0, dev1 = 0, devmask = 0;
2724 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2726 /* determine if device 0/1 are present */
2727 if (ap->flags & ATA_FLAG_SATA_RESET)
2730 dev0 = ata_devchk(ap, 0);
2732 dev1 = ata_devchk(ap, 1);
2736 devmask |= (1 << 0);
2738 devmask |= (1 << 1);
2740 /* select device 0 again */
2741 ap->ops->dev_select(ap, 0);
2743 /* issue bus reset */
2744 if (ap->flags & ATA_FLAG_SRST)
2745 if (ata_bus_softreset(ap, devmask))
2749 * determine by signature whether we have ATA or ATAPI devices
2751 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2752 if ((slave_possible) && (err != 0x81))
2753 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2755 /* re-enable interrupts */
2756 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2759 /* is double-select really necessary? */
2760 if (ap->device[1].class != ATA_DEV_NONE)
2761 ap->ops->dev_select(ap, 1);
2762 if (ap->device[0].class != ATA_DEV_NONE)
2763 ap->ops->dev_select(ap, 0);
2765 /* if no devices were detected, disable this port */
2766 if ((ap->device[0].class == ATA_DEV_NONE) &&
2767 (ap->device[1].class == ATA_DEV_NONE))
2770 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2771 /* set up device control for ATA_FLAG_SATA_RESET */
2772 if (ap->flags & ATA_FLAG_MMIO)
2773 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2775 outb(ap->ctl, ioaddr->ctl_addr);
2782 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2783 ap->ops->port_disable(ap);
2789 * sata_phy_debounce - debounce SATA phy status
2790 * @ap: ATA port to debounce SATA phy status for
2791 * @params: timing parameters { interval, duratinon, timeout } in msec
2793 * Make sure SStatus of @ap reaches stable state, determined by
2794 * holding the same value where DET is not 1 for @duration polled
2795 * every @interval, before @timeout. Timeout constraints the
2796 * beginning of the stable state. Because, after hot unplugging,
2797 * DET gets stuck at 1 on some controllers, this functions waits
2798 * until timeout then returns 0 if DET is stable at 1.
2801 * Kernel thread context (may sleep)
2804 * 0 on success, -errno on failure.
2806 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2808 unsigned long interval_msec = params[0];
2809 unsigned long duration = params[1] * HZ / 1000;
2810 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2811 unsigned long last_jiffies;
2815 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2820 last_jiffies = jiffies;
2823 msleep(interval_msec);
2824 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2830 if (cur == 1 && time_before(jiffies, timeout))
2832 if (time_after(jiffies, last_jiffies + duration))
2837 /* unstable, start over */
2839 last_jiffies = jiffies;
2842 if (time_after(jiffies, timeout))
2848 * sata_phy_resume - resume SATA phy
2849 * @ap: ATA port to resume SATA phy for
2850 * @params: timing parameters { interval, duratinon, timeout } in msec
2852 * Resume SATA phy of @ap and debounce it.
2855 * Kernel thread context (may sleep)
2858 * 0 on success, -errno on failure.
2860 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2865 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2868 scontrol = (scontrol & 0x0f0) | 0x300;
2870 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2873 /* Some PHYs react badly if SStatus is pounded immediately
2874 * after resuming. Delay 200ms before debouncing.
2878 return sata_phy_debounce(ap, params);
2881 static void ata_wait_spinup(struct ata_port *ap)
2883 struct ata_eh_context *ehc = &ap->eh_context;
2884 unsigned long end, secs;
2887 /* first, debounce phy if SATA */
2888 if (ap->cbl == ATA_CBL_SATA) {
2889 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2891 /* if debounced successfully and offline, no need to wait */
2892 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2896 /* okay, let's give the drive time to spin up */
2897 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2898 secs = ((end - jiffies) + HZ - 1) / HZ;
2900 if (time_after(jiffies, end))
2904 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2905 "(%lu secs)\n", secs);
2907 schedule_timeout_uninterruptible(end - jiffies);
2911 * ata_std_prereset - prepare for reset
2912 * @ap: ATA port to be reset
2914 * @ap is about to be reset. Initialize it.
2917 * Kernel thread context (may sleep)
2920 * 0 on success, -errno otherwise.
2922 int ata_std_prereset(struct ata_port *ap)
2924 struct ata_eh_context *ehc = &ap->eh_context;
2925 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2928 /* handle link resume & hotplug spinup */
2929 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2930 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2931 ehc->i.action |= ATA_EH_HARDRESET;
2933 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2934 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2935 ata_wait_spinup(ap);
2937 /* if we're about to do hardreset, nothing more to do */
2938 if (ehc->i.action & ATA_EH_HARDRESET)
2941 /* if SATA, resume phy */
2942 if (ap->cbl == ATA_CBL_SATA) {
2943 rc = sata_phy_resume(ap, timing);
2944 if (rc && rc != -EOPNOTSUPP) {
2945 /* phy resume failed */
2946 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2947 "link for reset (errno=%d)\n", rc);
2952 /* Wait for !BSY if the controller can wait for the first D2H
2953 * Reg FIS and we don't know that no device is attached.
2955 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2956 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2962 * ata_std_softreset - reset host port via ATA SRST
2963 * @ap: port to reset
2964 * @classes: resulting classes of attached devices
2966 * Reset host port using ATA SRST.
2969 * Kernel thread context (may sleep)
2972 * 0 on success, -errno otherwise.
2974 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2976 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2977 unsigned int devmask = 0, err_mask;
2982 if (ata_port_offline(ap)) {
2983 classes[0] = ATA_DEV_NONE;
2987 /* determine if device 0/1 are present */
2988 if (ata_devchk(ap, 0))
2989 devmask |= (1 << 0);
2990 if (slave_possible && ata_devchk(ap, 1))
2991 devmask |= (1 << 1);
2993 /* select device 0 again */
2994 ap->ops->dev_select(ap, 0);
2996 /* issue bus reset */
2997 DPRINTK("about to softreset, devmask=%x\n", devmask);
2998 err_mask = ata_bus_softreset(ap, devmask);
3000 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3005 /* determine by signature whether we have ATA or ATAPI devices */
3006 classes[0] = ata_dev_try_classify(ap, 0, &err);
3007 if (slave_possible && err != 0x81)
3008 classes[1] = ata_dev_try_classify(ap, 1, &err);
3011 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3016 * sata_port_hardreset - reset port via SATA phy reset
3017 * @ap: port to reset
3018 * @timing: timing parameters { interval, duratinon, timeout } in msec
3020 * SATA phy-reset host port using DET bits of SControl register.
3023 * Kernel thread context (may sleep)
3026 * 0 on success, -errno otherwise.
3028 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
3035 if (sata_set_spd_needed(ap)) {
3036 /* SATA spec says nothing about how to reconfigure
3037 * spd. To be on the safe side, turn off phy during
3038 * reconfiguration. This works for at least ICH7 AHCI
3041 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3044 scontrol = (scontrol & 0x0f0) | 0x304;
3046 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3052 /* issue phy wake/reset */
3053 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3056 scontrol = (scontrol & 0x0f0) | 0x301;
3058 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3061 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3062 * 10.4.2 says at least 1 ms.
3066 /* bring phy back */
3067 rc = sata_phy_resume(ap, timing);
3069 DPRINTK("EXIT, rc=%d\n", rc);
3074 * sata_std_hardreset - reset host port via SATA phy reset
3075 * @ap: port to reset
3076 * @class: resulting class of attached device
3078 * SATA phy-reset host port using DET bits of SControl register,
3079 * wait for !BSY and classify the attached device.
3082 * Kernel thread context (may sleep)
3085 * 0 on success, -errno otherwise.
3087 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3089 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3095 rc = sata_port_hardreset(ap, timing);
3097 ata_port_printk(ap, KERN_ERR,
3098 "COMRESET failed (errno=%d)\n", rc);
3102 /* TODO: phy layer with polling, timeouts, etc. */
3103 if (ata_port_offline(ap)) {
3104 *class = ATA_DEV_NONE;
3105 DPRINTK("EXIT, link offline\n");
3109 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
3110 ata_port_printk(ap, KERN_ERR,
3111 "COMRESET failed (device not ready)\n");
3115 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3117 *class = ata_dev_try_classify(ap, 0, NULL);
3119 DPRINTK("EXIT, class=%u\n", *class);
3124 * ata_std_postreset - standard postreset callback
3125 * @ap: the target ata_port
3126 * @classes: classes of attached devices
3128 * This function is invoked after a successful reset. Note that
3129 * the device might have been reset more than once using
3130 * different reset methods before postreset is invoked.
3133 * Kernel thread context (may sleep)
3135 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3141 /* print link status */
3142 sata_print_link_status(ap);
3145 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3146 sata_scr_write(ap, SCR_ERROR, serror);
3148 /* re-enable interrupts */
3149 if (!ap->ops->error_handler) {
3150 /* FIXME: hack. create a hook instead */
3151 if (ap->ioaddr.ctl_addr)
3155 /* is double-select really necessary? */
3156 if (classes[0] != ATA_DEV_NONE)
3157 ap->ops->dev_select(ap, 1);
3158 if (classes[1] != ATA_DEV_NONE)
3159 ap->ops->dev_select(ap, 0);
3161 /* bail out if no device is present */
3162 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3163 DPRINTK("EXIT, no device\n");
3167 /* set up device control */
3168 if (ap->ioaddr.ctl_addr) {
3169 if (ap->flags & ATA_FLAG_MMIO)
3170 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
3172 outb(ap->ctl, ap->ioaddr.ctl_addr);
3179 * ata_dev_same_device - Determine whether new ID matches configured device
3180 * @dev: device to compare against
3181 * @new_class: class of the new device
3182 * @new_id: IDENTIFY page of the new device
3184 * Compare @new_class and @new_id against @dev and determine
3185 * whether @dev is the device indicated by @new_class and
3192 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3194 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3197 const u16 *old_id = dev->id;
3198 unsigned char model[2][41], serial[2][21];
3201 if (dev->class != new_class) {
3202 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3203 dev->class, new_class);
3207 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
3208 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
3209 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
3210 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
3211 new_n_sectors = ata_id_n_sectors(new_id);
3213 if (strcmp(model[0], model[1])) {
3214 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3215 "'%s' != '%s'\n", model[0], model[1]);
3219 if (strcmp(serial[0], serial[1])) {
3220 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3221 "'%s' != '%s'\n", serial[0], serial[1]);
3225 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
3226 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3228 (unsigned long long)dev->n_sectors,
3229 (unsigned long long)new_n_sectors);
3237 * ata_dev_revalidate - Revalidate ATA device
3238 * @dev: device to revalidate
3239 * @readid_flags: read ID flags
3241 * Re-read IDENTIFY page and make sure @dev is still attached to
3245 * Kernel thread context (may sleep)
3248 * 0 on success, negative errno otherwise
3250 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3252 unsigned int class = dev->class;
3253 u16 *id = (void *)dev->ap->sector_buf;
3256 if (!ata_dev_enabled(dev)) {
3262 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3266 /* is the device still there? */
3267 if (!ata_dev_same_device(dev, class, id)) {
3272 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3274 /* configure device according to the new ID */
3275 rc = ata_dev_configure(dev);
3280 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3284 struct ata_blacklist_entry {
3285 const char *model_num;
3286 const char *model_rev;
3287 unsigned long horkage;
3290 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3291 /* Devices with DMA related problems under Linux */
3292 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3293 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3294 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3295 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3296 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3297 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3298 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3299 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3300 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3301 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3302 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3303 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3304 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3305 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3306 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3307 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3308 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3309 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3310 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3311 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3312 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3313 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3314 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3315 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3316 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3317 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3318 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3319 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3320 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3321 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3323 /* Devices we expect to fail diagnostics */
3325 /* Devices where NCQ should be avoided */
3327 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3329 /* Devices with NCQ limits */
3335 static int ata_strim(char *s, size_t len)
3337 len = strnlen(s, len);
3339 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3340 while ((len > 0) && (s[len - 1] == ' ')) {
3347 unsigned long ata_device_blacklisted(const struct ata_device *dev)
3349 unsigned char model_num[40];
3350 unsigned char model_rev[16];
3351 unsigned int nlen, rlen;
3352 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3354 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3356 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3358 nlen = ata_strim(model_num, sizeof(model_num));
3359 rlen = ata_strim(model_rev, sizeof(model_rev));
3361 while (ad->model_num) {
3362 if (!strncmp(ad->model_num, model_num, nlen)) {
3363 if (ad->model_rev == NULL)
3365 if (!strncmp(ad->model_rev, model_rev, rlen))
3373 static int ata_dma_blacklisted(const struct ata_device *dev)
3375 /* We don't support polling DMA.
3376 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3377 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3379 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3380 (dev->flags & ATA_DFLAG_CDB_INTR))
3382 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3386 * ata_dev_xfermask - Compute supported xfermask of the given device
3387 * @dev: Device to compute xfermask for
3389 * Compute supported xfermask of @dev and store it in
3390 * dev->*_mask. This function is responsible for applying all
3391 * known limits including host controller limits, device
3397 static void ata_dev_xfermask(struct ata_device *dev)
3399 struct ata_port *ap = dev->ap;
3400 struct ata_host *host = ap->host;
3401 unsigned long xfer_mask;
3403 /* controller modes available */
3404 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3405 ap->mwdma_mask, ap->udma_mask);
3407 /* Apply cable rule here. Don't apply it early because when
3408 * we handle hot plug the cable type can itself change.
3410 if (ap->cbl == ATA_CBL_PATA40)
3411 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3412 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3413 * host side are checked drive side as well. Cases where we know a
3414 * 40wire cable is used safely for 80 are not checked here.
3416 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3417 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3420 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3421 dev->mwdma_mask, dev->udma_mask);
3422 xfer_mask &= ata_id_xfermask(dev->id);
3425 * CFA Advanced TrueIDE timings are not allowed on a shared
3428 if (ata_dev_pair(dev)) {
3429 /* No PIO5 or PIO6 */
3430 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3431 /* No MWDMA3 or MWDMA 4 */
3432 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3435 if (ata_dma_blacklisted(dev)) {
3436 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3437 ata_dev_printk(dev, KERN_WARNING,
3438 "device is on DMA blacklist, disabling DMA\n");
3441 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3442 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3443 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3444 "other device, disabling DMA\n");
3447 if (ap->ops->mode_filter)
3448 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3450 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3451 &dev->mwdma_mask, &dev->udma_mask);
3455 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3456 * @dev: Device to which command will be sent
3458 * Issue SET FEATURES - XFER MODE command to device @dev
3462 * PCI/etc. bus probe sem.
3465 * 0 on success, AC_ERR_* mask otherwise.
3468 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3470 struct ata_taskfile tf;
3471 unsigned int err_mask;
3473 /* set up set-features taskfile */
3474 DPRINTK("set features - xfer mode\n");
3476 ata_tf_init(dev, &tf);
3477 tf.command = ATA_CMD_SET_FEATURES;
3478 tf.feature = SETFEATURES_XFER;
3479 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3480 tf.protocol = ATA_PROT_NODATA;
3481 tf.nsect = dev->xfer_mode;
3483 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3485 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3490 * ata_dev_init_params - Issue INIT DEV PARAMS command
3491 * @dev: Device to which command will be sent
3492 * @heads: Number of heads (taskfile parameter)
3493 * @sectors: Number of sectors (taskfile parameter)
3496 * Kernel thread context (may sleep)
3499 * 0 on success, AC_ERR_* mask otherwise.
3501 static unsigned int ata_dev_init_params(struct ata_device *dev,
3502 u16 heads, u16 sectors)
3504 struct ata_taskfile tf;
3505 unsigned int err_mask;
3507 /* Number of sectors per track 1-255. Number of heads 1-16 */
3508 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3509 return AC_ERR_INVALID;
3511 /* set up init dev params taskfile */
3512 DPRINTK("init dev params \n");
3514 ata_tf_init(dev, &tf);
3515 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3516 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3517 tf.protocol = ATA_PROT_NODATA;
3519 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3521 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3523 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3528 * ata_sg_clean - Unmap DMA memory associated with command
3529 * @qc: Command containing DMA memory to be released
3531 * Unmap all mapped DMA memory associated with this command.
3534 * spin_lock_irqsave(host lock)
3537 static void ata_sg_clean(struct ata_queued_cmd *qc)
3539 struct ata_port *ap = qc->ap;
3540 struct scatterlist *sg = qc->__sg;
3541 int dir = qc->dma_dir;
3542 void *pad_buf = NULL;
3544 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3545 WARN_ON(sg == NULL);
3547 if (qc->flags & ATA_QCFLAG_SINGLE)
3548 WARN_ON(qc->n_elem > 1);
3550 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3552 /* if we padded the buffer out to 32-bit bound, and data
3553 * xfer direction is from-device, we must copy from the
3554 * pad buffer back into the supplied buffer
3556 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3557 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3559 if (qc->flags & ATA_QCFLAG_SG) {
3561 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3562 /* restore last sg */
3563 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3565 struct scatterlist *psg = &qc->pad_sgent;
3566 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3567 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3568 kunmap_atomic(addr, KM_IRQ0);
3572 dma_unmap_single(ap->dev,
3573 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3576 sg->length += qc->pad_len;
3578 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3579 pad_buf, qc->pad_len);
3582 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3587 * ata_fill_sg - Fill PCI IDE PRD table
3588 * @qc: Metadata associated with taskfile to be transferred
3590 * Fill PCI IDE PRD (scatter-gather) table with segments
3591 * associated with the current disk command.
3594 * spin_lock_irqsave(host lock)
3597 static void ata_fill_sg(struct ata_queued_cmd *qc)
3599 struct ata_port *ap = qc->ap;
3600 struct scatterlist *sg;
3603 WARN_ON(qc->__sg == NULL);
3604 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3607 ata_for_each_sg(sg, qc) {
3611 /* determine if physical DMA addr spans 64K boundary.
3612 * Note h/w doesn't support 64-bit, so we unconditionally
3613 * truncate dma_addr_t to u32.
3615 addr = (u32) sg_dma_address(sg);
3616 sg_len = sg_dma_len(sg);
3619 offset = addr & 0xffff;
3621 if ((offset + sg_len) > 0x10000)
3622 len = 0x10000 - offset;
3624 ap->prd[idx].addr = cpu_to_le32(addr);
3625 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3626 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3635 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3638 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3639 * @qc: Metadata associated with taskfile to check
3641 * Allow low-level driver to filter ATA PACKET commands, returning
3642 * a status indicating whether or not it is OK to use DMA for the
3643 * supplied PACKET command.
3646 * spin_lock_irqsave(host lock)
3648 * RETURNS: 0 when ATAPI DMA can be used
3651 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3653 struct ata_port *ap = qc->ap;
3654 int rc = 0; /* Assume ATAPI DMA is OK by default */
3656 if (ap->ops->check_atapi_dma)
3657 rc = ap->ops->check_atapi_dma(qc);
3662 * ata_qc_prep - Prepare taskfile for submission
3663 * @qc: Metadata associated with taskfile to be prepared
3665 * Prepare ATA taskfile for submission.
3668 * spin_lock_irqsave(host lock)
3670 void ata_qc_prep(struct ata_queued_cmd *qc)
3672 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3678 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3681 * ata_sg_init_one - Associate command with memory buffer
3682 * @qc: Command to be associated
3683 * @buf: Memory buffer
3684 * @buflen: Length of memory buffer, in bytes.
3686 * Initialize the data-related elements of queued_cmd @qc
3687 * to point to a single memory buffer, @buf of byte length @buflen.
3690 * spin_lock_irqsave(host lock)
3693 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3695 qc->flags |= ATA_QCFLAG_SINGLE;
3697 qc->__sg = &qc->sgent;
3699 qc->orig_n_elem = 1;
3701 qc->nbytes = buflen;
3703 sg_init_one(&qc->sgent, buf, buflen);
3707 * ata_sg_init - Associate command with scatter-gather table.
3708 * @qc: Command to be associated
3709 * @sg: Scatter-gather table.
3710 * @n_elem: Number of elements in s/g table.
3712 * Initialize the data-related elements of queued_cmd @qc
3713 * to point to a scatter-gather table @sg, containing @n_elem
3717 * spin_lock_irqsave(host lock)
3720 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3721 unsigned int n_elem)
3723 qc->flags |= ATA_QCFLAG_SG;
3725 qc->n_elem = n_elem;
3726 qc->orig_n_elem = n_elem;
3730 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3731 * @qc: Command with memory buffer to be mapped.
3733 * DMA-map the memory buffer associated with queued_cmd @qc.
3736 * spin_lock_irqsave(host lock)
3739 * Zero on success, negative on error.
3742 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3744 struct ata_port *ap = qc->ap;
3745 int dir = qc->dma_dir;
3746 struct scatterlist *sg = qc->__sg;
3747 dma_addr_t dma_address;
3750 /* we must lengthen transfers to end on a 32-bit boundary */
3751 qc->pad_len = sg->length & 3;
3753 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3754 struct scatterlist *psg = &qc->pad_sgent;
3756 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3758 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3760 if (qc->tf.flags & ATA_TFLAG_WRITE)
3761 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3764 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3765 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3767 sg->length -= qc->pad_len;
3768 if (sg->length == 0)
3771 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3772 sg->length, qc->pad_len);
3780 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3782 if (dma_mapping_error(dma_address)) {
3784 sg->length += qc->pad_len;
3788 sg_dma_address(sg) = dma_address;
3789 sg_dma_len(sg) = sg->length;
3792 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3793 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3799 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3800 * @qc: Command with scatter-gather table to be mapped.
3802 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3805 * spin_lock_irqsave(host lock)
3808 * Zero on success, negative on error.
3812 static int ata_sg_setup(struct ata_queued_cmd *qc)
3814 struct ata_port *ap = qc->ap;
3815 struct scatterlist *sg = qc->__sg;
3816 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3817 int n_elem, pre_n_elem, dir, trim_sg = 0;
3819 VPRINTK("ENTER, ata%u\n", ap->id);
3820 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3822 /* we must lengthen transfers to end on a 32-bit boundary */
3823 qc->pad_len = lsg->length & 3;
3825 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3826 struct scatterlist *psg = &qc->pad_sgent;
3827 unsigned int offset;
3829 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3831 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3834 * psg->page/offset are used to copy to-be-written
3835 * data in this function or read data in ata_sg_clean.
3837 offset = lsg->offset + lsg->length - qc->pad_len;
3838 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3839 psg->offset = offset_in_page(offset);
3841 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3842 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3843 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3844 kunmap_atomic(addr, KM_IRQ0);
3847 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3848 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3850 lsg->length -= qc->pad_len;
3851 if (lsg->length == 0)
3854 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3855 qc->n_elem - 1, lsg->length, qc->pad_len);
3858 pre_n_elem = qc->n_elem;
3859 if (trim_sg && pre_n_elem)
3868 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3870 /* restore last sg */
3871 lsg->length += qc->pad_len;
3875 DPRINTK("%d sg elements mapped\n", n_elem);
3878 qc->n_elem = n_elem;
3884 * swap_buf_le16 - swap halves of 16-bit words in place
3885 * @buf: Buffer to swap
3886 * @buf_words: Number of 16-bit words in buffer.
3888 * Swap halves of 16-bit words if needed to convert from
3889 * little-endian byte order to native cpu byte order, or
3893 * Inherited from caller.
3895 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3900 for (i = 0; i < buf_words; i++)
3901 buf[i] = le16_to_cpu(buf[i]);
3902 #endif /* __BIG_ENDIAN */
3906 * ata_mmio_data_xfer - Transfer data by MMIO
3907 * @adev: device for this I/O
3909 * @buflen: buffer length
3910 * @write_data: read/write
3912 * Transfer data from/to the device data register by MMIO.
3915 * Inherited from caller.
3918 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3919 unsigned int buflen, int write_data)
3921 struct ata_port *ap = adev->ap;
3923 unsigned int words = buflen >> 1;
3924 u16 *buf16 = (u16 *) buf;
3925 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3927 /* Transfer multiple of 2 bytes */
3929 for (i = 0; i < words; i++)
3930 writew(le16_to_cpu(buf16[i]), mmio);
3932 for (i = 0; i < words; i++)
3933 buf16[i] = cpu_to_le16(readw(mmio));
3936 /* Transfer trailing 1 byte, if any. */
3937 if (unlikely(buflen & 0x01)) {
3938 u16 align_buf[1] = { 0 };
3939 unsigned char *trailing_buf = buf + buflen - 1;
3942 memcpy(align_buf, trailing_buf, 1);
3943 writew(le16_to_cpu(align_buf[0]), mmio);
3945 align_buf[0] = cpu_to_le16(readw(mmio));
3946 memcpy(trailing_buf, align_buf, 1);
3952 * ata_pio_data_xfer - Transfer data by PIO
3953 * @adev: device to target
3955 * @buflen: buffer length
3956 * @write_data: read/write
3958 * Transfer data from/to the device data register by PIO.
3961 * Inherited from caller.
3964 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3965 unsigned int buflen, int write_data)
3967 struct ata_port *ap = adev->ap;
3968 unsigned int words = buflen >> 1;
3970 /* Transfer multiple of 2 bytes */
3972 outsw(ap->ioaddr.data_addr, buf, words);
3974 insw(ap->ioaddr.data_addr, buf, words);
3976 /* Transfer trailing 1 byte, if any. */
3977 if (unlikely(buflen & 0x01)) {
3978 u16 align_buf[1] = { 0 };
3979 unsigned char *trailing_buf = buf + buflen - 1;
3982 memcpy(align_buf, trailing_buf, 1);
3983 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3985 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3986 memcpy(trailing_buf, align_buf, 1);
3992 * ata_pio_data_xfer_noirq - Transfer data by PIO
3993 * @adev: device to target
3995 * @buflen: buffer length
3996 * @write_data: read/write
3998 * Transfer data from/to the device data register by PIO. Do the
3999 * transfer with interrupts disabled.
4002 * Inherited from caller.
4005 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4006 unsigned int buflen, int write_data)
4008 unsigned long flags;
4009 local_irq_save(flags);
4010 ata_pio_data_xfer(adev, buf, buflen, write_data);
4011 local_irq_restore(flags);
4016 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4017 * @qc: Command on going
4019 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4022 * Inherited from caller.
4025 static void ata_pio_sector(struct ata_queued_cmd *qc)
4027 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4028 struct scatterlist *sg = qc->__sg;
4029 struct ata_port *ap = qc->ap;
4031 unsigned int offset;
4034 if (qc->cursect == (qc->nsect - 1))
4035 ap->hsm_task_state = HSM_ST_LAST;
4037 page = sg[qc->cursg].page;
4038 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
4040 /* get the current page and offset */
4041 page = nth_page(page, (offset >> PAGE_SHIFT));
4042 offset %= PAGE_SIZE;
4044 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4046 if (PageHighMem(page)) {
4047 unsigned long flags;
4049 /* FIXME: use a bounce buffer */
4050 local_irq_save(flags);
4051 buf = kmap_atomic(page, KM_IRQ0);
4053 /* do the actual data transfer */
4054 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
4056 kunmap_atomic(buf, KM_IRQ0);
4057 local_irq_restore(flags);
4059 buf = page_address(page);
4060 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
4066 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
4073 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4074 * @qc: Command on going
4076 * Transfer one or many ATA_SECT_SIZE of data from/to the
4077 * ATA device for the DRQ request.
4080 * Inherited from caller.
4083 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4085 if (is_multi_taskfile(&qc->tf)) {
4086 /* READ/WRITE MULTIPLE */
4089 WARN_ON(qc->dev->multi_count == 0);
4091 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
4099 * atapi_send_cdb - Write CDB bytes to hardware
4100 * @ap: Port to which ATAPI device is attached.
4101 * @qc: Taskfile currently active
4103 * When device has indicated its readiness to accept
4104 * a CDB, this function is called. Send the CDB.
4110 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4113 DPRINTK("send cdb\n");
4114 WARN_ON(qc->dev->cdb_len < 12);
4116 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4117 ata_altstatus(ap); /* flush */
4119 switch (qc->tf.protocol) {
4120 case ATA_PROT_ATAPI:
4121 ap->hsm_task_state = HSM_ST;
4123 case ATA_PROT_ATAPI_NODATA:
4124 ap->hsm_task_state = HSM_ST_LAST;
4126 case ATA_PROT_ATAPI_DMA:
4127 ap->hsm_task_state = HSM_ST_LAST;
4128 /* initiate bmdma */
4129 ap->ops->bmdma_start(qc);
4135 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4136 * @qc: Command on going
4137 * @bytes: number of bytes
4139 * Transfer Transfer data from/to the ATAPI device.
4142 * Inherited from caller.
4146 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4148 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4149 struct scatterlist *sg = qc->__sg;
4150 struct ata_port *ap = qc->ap;
4153 unsigned int offset, count;
4155 if (qc->curbytes + bytes >= qc->nbytes)
4156 ap->hsm_task_state = HSM_ST_LAST;
4159 if (unlikely(qc->cursg >= qc->n_elem)) {
4161 * The end of qc->sg is reached and the device expects
4162 * more data to transfer. In order not to overrun qc->sg
4163 * and fulfill length specified in the byte count register,
4164 * - for read case, discard trailing data from the device
4165 * - for write case, padding zero data to the device
4167 u16 pad_buf[1] = { 0 };
4168 unsigned int words = bytes >> 1;
4171 if (words) /* warning if bytes > 1 */
4172 ata_dev_printk(qc->dev, KERN_WARNING,
4173 "%u bytes trailing data\n", bytes);
4175 for (i = 0; i < words; i++)
4176 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4178 ap->hsm_task_state = HSM_ST_LAST;
4182 sg = &qc->__sg[qc->cursg];
4185 offset = sg->offset + qc->cursg_ofs;
4187 /* get the current page and offset */
4188 page = nth_page(page, (offset >> PAGE_SHIFT));
4189 offset %= PAGE_SIZE;
4191 /* don't overrun current sg */
4192 count = min(sg->length - qc->cursg_ofs, bytes);
4194 /* don't cross page boundaries */
4195 count = min(count, (unsigned int)PAGE_SIZE - offset);
4197 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4199 if (PageHighMem(page)) {
4200 unsigned long flags;
4202 /* FIXME: use bounce buffer */
4203 local_irq_save(flags);
4204 buf = kmap_atomic(page, KM_IRQ0);
4206 /* do the actual data transfer */
4207 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4209 kunmap_atomic(buf, KM_IRQ0);
4210 local_irq_restore(flags);
4212 buf = page_address(page);
4213 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4217 qc->curbytes += count;
4218 qc->cursg_ofs += count;
4220 if (qc->cursg_ofs == sg->length) {
4230 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4231 * @qc: Command on going
4233 * Transfer Transfer data from/to the ATAPI device.
4236 * Inherited from caller.
4239 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4241 struct ata_port *ap = qc->ap;
4242 struct ata_device *dev = qc->dev;
4243 unsigned int ireason, bc_lo, bc_hi, bytes;
4244 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4246 /* Abuse qc->result_tf for temp storage of intermediate TF
4247 * here to save some kernel stack usage.
4248 * For normal completion, qc->result_tf is not relevant. For
4249 * error, qc->result_tf is later overwritten by ata_qc_complete().
4250 * So, the correctness of qc->result_tf is not affected.
4252 ap->ops->tf_read(ap, &qc->result_tf);
4253 ireason = qc->result_tf.nsect;
4254 bc_lo = qc->result_tf.lbam;
4255 bc_hi = qc->result_tf.lbah;
4256 bytes = (bc_hi << 8) | bc_lo;
4258 /* shall be cleared to zero, indicating xfer of data */
4259 if (ireason & (1 << 0))
4262 /* make sure transfer direction matches expected */
4263 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4264 if (do_write != i_write)
4267 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4269 __atapi_pio_bytes(qc, bytes);
4274 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4275 qc->err_mask |= AC_ERR_HSM;
4276 ap->hsm_task_state = HSM_ST_ERR;
4280 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4281 * @ap: the target ata_port
4285 * 1 if ok in workqueue, 0 otherwise.
4288 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4290 if (qc->tf.flags & ATA_TFLAG_POLLING)
4293 if (ap->hsm_task_state == HSM_ST_FIRST) {
4294 if (qc->tf.protocol == ATA_PROT_PIO &&
4295 (qc->tf.flags & ATA_TFLAG_WRITE))
4298 if (is_atapi_taskfile(&qc->tf) &&
4299 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4307 * ata_hsm_qc_complete - finish a qc running on standard HSM
4308 * @qc: Command to complete
4309 * @in_wq: 1 if called from workqueue, 0 otherwise
4311 * Finish @qc which is running on standard HSM.
4314 * If @in_wq is zero, spin_lock_irqsave(host lock).
4315 * Otherwise, none on entry and grabs host lock.
4317 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4319 struct ata_port *ap = qc->ap;
4320 unsigned long flags;
4322 if (ap->ops->error_handler) {
4324 spin_lock_irqsave(ap->lock, flags);
4326 /* EH might have kicked in while host lock is
4329 qc = ata_qc_from_tag(ap, qc->tag);
4331 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4333 ata_qc_complete(qc);
4335 ata_port_freeze(ap);
4338 spin_unlock_irqrestore(ap->lock, flags);
4340 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4341 ata_qc_complete(qc);
4343 ata_port_freeze(ap);
4347 spin_lock_irqsave(ap->lock, flags);
4349 ata_qc_complete(qc);
4350 spin_unlock_irqrestore(ap->lock, flags);
4352 ata_qc_complete(qc);
4355 ata_altstatus(ap); /* flush */
4359 * ata_hsm_move - move the HSM to the next state.
4360 * @ap: the target ata_port
4362 * @status: current device status
4363 * @in_wq: 1 if called from workqueue, 0 otherwise
4366 * 1 when poll next status needed, 0 otherwise.
4368 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4369 u8 status, int in_wq)
4371 unsigned long flags = 0;
4374 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4376 /* Make sure ata_qc_issue_prot() does not throw things
4377 * like DMA polling into the workqueue. Notice that
4378 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4380 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4383 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4384 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4386 switch (ap->hsm_task_state) {
4388 /* Send first data block or PACKET CDB */
4390 /* If polling, we will stay in the work queue after
4391 * sending the data. Otherwise, interrupt handler
4392 * takes over after sending the data.
4394 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4396 /* check device status */
4397 if (unlikely((status & ATA_DRQ) == 0)) {
4398 /* handle BSY=0, DRQ=0 as error */
4399 if (likely(status & (ATA_ERR | ATA_DF)))
4400 /* device stops HSM for abort/error */
4401 qc->err_mask |= AC_ERR_DEV;
4403 /* HSM violation. Let EH handle this */
4404 qc->err_mask |= AC_ERR_HSM;
4406 ap->hsm_task_state = HSM_ST_ERR;
4410 /* Device should not ask for data transfer (DRQ=1)
4411 * when it finds something wrong.
4412 * We ignore DRQ here and stop the HSM by
4413 * changing hsm_task_state to HSM_ST_ERR and
4414 * let the EH abort the command or reset the device.
4416 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4417 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4419 qc->err_mask |= AC_ERR_HSM;
4420 ap->hsm_task_state = HSM_ST_ERR;
4424 /* Send the CDB (atapi) or the first data block (ata pio out).
4425 * During the state transition, interrupt handler shouldn't
4426 * be invoked before the data transfer is complete and
4427 * hsm_task_state is changed. Hence, the following locking.
4430 spin_lock_irqsave(ap->lock, flags);
4432 if (qc->tf.protocol == ATA_PROT_PIO) {
4433 /* PIO data out protocol.
4434 * send first data block.
4437 /* ata_pio_sectors() might change the state
4438 * to HSM_ST_LAST. so, the state is changed here
4439 * before ata_pio_sectors().
4441 ap->hsm_task_state = HSM_ST;
4442 ata_pio_sectors(qc);
4443 ata_altstatus(ap); /* flush */
4446 atapi_send_cdb(ap, qc);
4449 spin_unlock_irqrestore(ap->lock, flags);
4451 /* if polling, ata_pio_task() handles the rest.
4452 * otherwise, interrupt handler takes over from here.
4457 /* complete command or read/write the data register */
4458 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4459 /* ATAPI PIO protocol */
4460 if ((status & ATA_DRQ) == 0) {
4461 /* No more data to transfer or device error.
4462 * Device error will be tagged in HSM_ST_LAST.
4464 ap->hsm_task_state = HSM_ST_LAST;
4468 /* Device should not ask for data transfer (DRQ=1)
4469 * when it finds something wrong.
4470 * We ignore DRQ here and stop the HSM by
4471 * changing hsm_task_state to HSM_ST_ERR and
4472 * let the EH abort the command or reset the device.
4474 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4475 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4477 qc->err_mask |= AC_ERR_HSM;
4478 ap->hsm_task_state = HSM_ST_ERR;
4482 atapi_pio_bytes(qc);
4484 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4485 /* bad ireason reported by device */
4489 /* ATA PIO protocol */
4490 if (unlikely((status & ATA_DRQ) == 0)) {
4491 /* handle BSY=0, DRQ=0 as error */
4492 if (likely(status & (ATA_ERR | ATA_DF)))
4493 /* device stops HSM for abort/error */
4494 qc->err_mask |= AC_ERR_DEV;
4496 /* HSM violation. Let EH handle this.
4497 * Phantom devices also trigger this
4498 * condition. Mark hint.
4500 qc->err_mask |= AC_ERR_HSM |
4503 ap->hsm_task_state = HSM_ST_ERR;
4507 /* For PIO reads, some devices may ask for
4508 * data transfer (DRQ=1) alone with ERR=1.
4509 * We respect DRQ here and transfer one
4510 * block of junk data before changing the
4511 * hsm_task_state to HSM_ST_ERR.
4513 * For PIO writes, ERR=1 DRQ=1 doesn't make
4514 * sense since the data block has been
4515 * transferred to the device.
4517 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4518 /* data might be corrputed */
4519 qc->err_mask |= AC_ERR_DEV;
4521 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4522 ata_pio_sectors(qc);
4524 status = ata_wait_idle(ap);
4527 if (status & (ATA_BUSY | ATA_DRQ))
4528 qc->err_mask |= AC_ERR_HSM;
4530 /* ata_pio_sectors() might change the
4531 * state to HSM_ST_LAST. so, the state
4532 * is changed after ata_pio_sectors().
4534 ap->hsm_task_state = HSM_ST_ERR;
4538 ata_pio_sectors(qc);
4540 if (ap->hsm_task_state == HSM_ST_LAST &&
4541 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4544 status = ata_wait_idle(ap);
4549 ata_altstatus(ap); /* flush */
4554 if (unlikely(!ata_ok(status))) {
4555 qc->err_mask |= __ac_err_mask(status);
4556 ap->hsm_task_state = HSM_ST_ERR;
4560 /* no more data to transfer */
4561 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4562 ap->id, qc->dev->devno, status);
4564 WARN_ON(qc->err_mask);
4566 ap->hsm_task_state = HSM_ST_IDLE;
4568 /* complete taskfile transaction */
4569 ata_hsm_qc_complete(qc, in_wq);
4575 /* make sure qc->err_mask is available to
4576 * know what's wrong and recover
4578 WARN_ON(qc->err_mask == 0);
4580 ap->hsm_task_state = HSM_ST_IDLE;
4582 /* complete taskfile transaction */
4583 ata_hsm_qc_complete(qc, in_wq);
4595 static void ata_pio_task(void *_data)
4597 struct ata_queued_cmd *qc = _data;
4598 struct ata_port *ap = qc->ap;
4603 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4606 * This is purely heuristic. This is a fast path.
4607 * Sometimes when we enter, BSY will be cleared in
4608 * a chk-status or two. If not, the drive is probably seeking
4609 * or something. Snooze for a couple msecs, then
4610 * chk-status again. If still busy, queue delayed work.
4612 status = ata_busy_wait(ap, ATA_BUSY, 5);
4613 if (status & ATA_BUSY) {
4615 status = ata_busy_wait(ap, ATA_BUSY, 10);
4616 if (status & ATA_BUSY) {
4617 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4623 poll_next = ata_hsm_move(ap, qc, status, 1);
4625 /* another command or interrupt handler
4626 * may be running at this point.
4633 * ata_qc_new - Request an available ATA command, for queueing
4634 * @ap: Port associated with device @dev
4635 * @dev: Device from whom we request an available command structure
4641 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4643 struct ata_queued_cmd *qc = NULL;
4646 /* no command while frozen */
4647 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4650 /* the last tag is reserved for internal command. */
4651 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4652 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4653 qc = __ata_qc_from_tag(ap, i);
4664 * ata_qc_new_init - Request an available ATA command, and initialize it
4665 * @dev: Device from whom we request an available command structure
4671 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4673 struct ata_port *ap = dev->ap;
4674 struct ata_queued_cmd *qc;
4676 qc = ata_qc_new(ap);
4689 * ata_qc_free - free unused ata_queued_cmd
4690 * @qc: Command to complete
4692 * Designed to free unused ata_queued_cmd object
4693 * in case something prevents using it.
4696 * spin_lock_irqsave(host lock)
4698 void ata_qc_free(struct ata_queued_cmd *qc)
4700 struct ata_port *ap = qc->ap;
4703 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4707 if (likely(ata_tag_valid(tag))) {
4708 qc->tag = ATA_TAG_POISON;
4709 clear_bit(tag, &ap->qc_allocated);
4713 void __ata_qc_complete(struct ata_queued_cmd *qc)
4715 struct ata_port *ap = qc->ap;
4717 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4718 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4720 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4723 /* command should be marked inactive atomically with qc completion */
4724 if (qc->tf.protocol == ATA_PROT_NCQ)
4725 ap->sactive &= ~(1 << qc->tag);
4727 ap->active_tag = ATA_TAG_POISON;
4729 /* atapi: mark qc as inactive to prevent the interrupt handler
4730 * from completing the command twice later, before the error handler
4731 * is called. (when rc != 0 and atapi request sense is needed)
4733 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4734 ap->qc_active &= ~(1 << qc->tag);
4736 /* call completion callback */
4737 qc->complete_fn(qc);
4740 static void fill_result_tf(struct ata_queued_cmd *qc)
4742 struct ata_port *ap = qc->ap;
4744 ap->ops->tf_read(ap, &qc->result_tf);
4745 qc->result_tf.flags = qc->tf.flags;
4749 * ata_qc_complete - Complete an active ATA command
4750 * @qc: Command to complete
4751 * @err_mask: ATA Status register contents
4753 * Indicate to the mid and upper layers that an ATA
4754 * command has completed, with either an ok or not-ok status.
4757 * spin_lock_irqsave(host lock)
4759 void ata_qc_complete(struct ata_queued_cmd *qc)
4761 struct ata_port *ap = qc->ap;
4763 /* XXX: New EH and old EH use different mechanisms to
4764 * synchronize EH with regular execution path.
4766 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4767 * Normal execution path is responsible for not accessing a
4768 * failed qc. libata core enforces the rule by returning NULL
4769 * from ata_qc_from_tag() for failed qcs.
4771 * Old EH depends on ata_qc_complete() nullifying completion
4772 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4773 * not synchronize with interrupt handler. Only PIO task is
4776 if (ap->ops->error_handler) {
4777 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4779 if (unlikely(qc->err_mask))
4780 qc->flags |= ATA_QCFLAG_FAILED;
4782 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4783 if (!ata_tag_internal(qc->tag)) {
4784 /* always fill result TF for failed qc */
4786 ata_qc_schedule_eh(qc);
4791 /* read result TF if requested */
4792 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4795 __ata_qc_complete(qc);
4797 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4800 /* read result TF if failed or requested */
4801 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4804 __ata_qc_complete(qc);
4809 * ata_qc_complete_multiple - Complete multiple qcs successfully
4810 * @ap: port in question
4811 * @qc_active: new qc_active mask
4812 * @finish_qc: LLDD callback invoked before completing a qc
4814 * Complete in-flight commands. This functions is meant to be
4815 * called from low-level driver's interrupt routine to complete
4816 * requests normally. ap->qc_active and @qc_active is compared
4817 * and commands are completed accordingly.
4820 * spin_lock_irqsave(host lock)
4823 * Number of completed commands on success, -errno otherwise.
4825 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4826 void (*finish_qc)(struct ata_queued_cmd *))
4832 done_mask = ap->qc_active ^ qc_active;
4834 if (unlikely(done_mask & qc_active)) {
4835 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4836 "(%08x->%08x)\n", ap->qc_active, qc_active);
4840 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4841 struct ata_queued_cmd *qc;
4843 if (!(done_mask & (1 << i)))
4846 if ((qc = ata_qc_from_tag(ap, i))) {
4849 ata_qc_complete(qc);
4857 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4859 struct ata_port *ap = qc->ap;
4861 switch (qc->tf.protocol) {
4864 case ATA_PROT_ATAPI_DMA:
4867 case ATA_PROT_ATAPI:
4869 if (ap->flags & ATA_FLAG_PIO_DMA)
4882 * ata_qc_issue - issue taskfile to device
4883 * @qc: command to issue to device
4885 * Prepare an ATA command to submission to device.
4886 * This includes mapping the data into a DMA-able
4887 * area, filling in the S/G table, and finally
4888 * writing the taskfile to hardware, starting the command.
4891 * spin_lock_irqsave(host lock)
4893 void ata_qc_issue(struct ata_queued_cmd *qc)
4895 struct ata_port *ap = qc->ap;
4897 /* Make sure only one non-NCQ command is outstanding. The
4898 * check is skipped for old EH because it reuses active qc to
4899 * request ATAPI sense.
4901 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4903 if (qc->tf.protocol == ATA_PROT_NCQ) {
4904 WARN_ON(ap->sactive & (1 << qc->tag));
4905 ap->sactive |= 1 << qc->tag;
4907 WARN_ON(ap->sactive);
4908 ap->active_tag = qc->tag;
4911 qc->flags |= ATA_QCFLAG_ACTIVE;
4912 ap->qc_active |= 1 << qc->tag;
4914 if (ata_should_dma_map(qc)) {
4915 if (qc->flags & ATA_QCFLAG_SG) {
4916 if (ata_sg_setup(qc))
4918 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4919 if (ata_sg_setup_one(qc))
4923 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4926 ap->ops->qc_prep(qc);
4928 qc->err_mask |= ap->ops->qc_issue(qc);
4929 if (unlikely(qc->err_mask))
4934 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4935 qc->err_mask |= AC_ERR_SYSTEM;
4937 ata_qc_complete(qc);
4941 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4942 * @qc: command to issue to device
4944 * Using various libata functions and hooks, this function
4945 * starts an ATA command. ATA commands are grouped into
4946 * classes called "protocols", and issuing each type of protocol
4947 * is slightly different.
4949 * May be used as the qc_issue() entry in ata_port_operations.
4952 * spin_lock_irqsave(host lock)
4955 * Zero on success, AC_ERR_* mask on failure
4958 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4960 struct ata_port *ap = qc->ap;
4962 /* Use polling pio if the LLD doesn't handle
4963 * interrupt driven pio and atapi CDB interrupt.
4965 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4966 switch (qc->tf.protocol) {
4968 case ATA_PROT_ATAPI:
4969 case ATA_PROT_ATAPI_NODATA:
4970 qc->tf.flags |= ATA_TFLAG_POLLING;
4972 case ATA_PROT_ATAPI_DMA:
4973 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4974 /* see ata_dma_blacklisted() */
4982 /* Some controllers show flaky interrupt behavior after
4983 * setting xfer mode. Use polling instead.
4985 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4986 qc->tf.feature == SETFEATURES_XFER) &&
4987 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4988 qc->tf.flags |= ATA_TFLAG_POLLING;
4990 /* select the device */
4991 ata_dev_select(ap, qc->dev->devno, 1, 0);
4993 /* start the command */
4994 switch (qc->tf.protocol) {
4995 case ATA_PROT_NODATA:
4996 if (qc->tf.flags & ATA_TFLAG_POLLING)
4997 ata_qc_set_polling(qc);
4999 ata_tf_to_host(ap, &qc->tf);
5000 ap->hsm_task_state = HSM_ST_LAST;
5002 if (qc->tf.flags & ATA_TFLAG_POLLING)
5003 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5008 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5010 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5011 ap->ops->bmdma_setup(qc); /* set up bmdma */
5012 ap->ops->bmdma_start(qc); /* initiate bmdma */
5013 ap->hsm_task_state = HSM_ST_LAST;
5017 if (qc->tf.flags & ATA_TFLAG_POLLING)
5018 ata_qc_set_polling(qc);
5020 ata_tf_to_host(ap, &qc->tf);
5022 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5023 /* PIO data out protocol */
5024 ap->hsm_task_state = HSM_ST_FIRST;
5025 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5027 /* always send first data block using
5028 * the ata_pio_task() codepath.
5031 /* PIO data in protocol */
5032 ap->hsm_task_state = HSM_ST;
5034 if (qc->tf.flags & ATA_TFLAG_POLLING)
5035 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5037 /* if polling, ata_pio_task() handles the rest.
5038 * otherwise, interrupt handler takes over from here.
5044 case ATA_PROT_ATAPI:
5045 case ATA_PROT_ATAPI_NODATA:
5046 if (qc->tf.flags & ATA_TFLAG_POLLING)
5047 ata_qc_set_polling(qc);
5049 ata_tf_to_host(ap, &qc->tf);
5051 ap->hsm_task_state = HSM_ST_FIRST;
5053 /* send cdb by polling if no cdb interrupt */
5054 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5055 (qc->tf.flags & ATA_TFLAG_POLLING))
5056 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5059 case ATA_PROT_ATAPI_DMA:
5060 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5062 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5063 ap->ops->bmdma_setup(qc); /* set up bmdma */
5064 ap->hsm_task_state = HSM_ST_FIRST;
5066 /* send cdb by polling if no cdb interrupt */
5067 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5068 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5073 return AC_ERR_SYSTEM;
5080 * ata_host_intr - Handle host interrupt for given (port, task)
5081 * @ap: Port on which interrupt arrived (possibly...)
5082 * @qc: Taskfile currently active in engine
5084 * Handle host interrupt for given queued command. Currently,
5085 * only DMA interrupts are handled. All other commands are
5086 * handled via polling with interrupts disabled (nIEN bit).
5089 * spin_lock_irqsave(host lock)
5092 * One if interrupt was handled, zero if not (shared irq).
5095 inline unsigned int ata_host_intr (struct ata_port *ap,
5096 struct ata_queued_cmd *qc)
5098 struct ata_eh_info *ehi = &ap->eh_info;
5099 u8 status, host_stat = 0;
5101 VPRINTK("ata%u: protocol %d task_state %d\n",
5102 ap->id, qc->tf.protocol, ap->hsm_task_state);
5104 /* Check whether we are expecting interrupt in this state */
5105 switch (ap->hsm_task_state) {
5107 /* Some pre-ATAPI-4 devices assert INTRQ
5108 * at this state when ready to receive CDB.
5111 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5112 * The flag was turned on only for atapi devices.
5113 * No need to check is_atapi_taskfile(&qc->tf) again.
5115 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5119 if (qc->tf.protocol == ATA_PROT_DMA ||
5120 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5121 /* check status of DMA engine */
5122 host_stat = ap->ops->bmdma_status(ap);
5123 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
5125 /* if it's not our irq... */
5126 if (!(host_stat & ATA_DMA_INTR))
5129 /* before we do anything else, clear DMA-Start bit */
5130 ap->ops->bmdma_stop(qc);
5132 if (unlikely(host_stat & ATA_DMA_ERR)) {
5133 /* error when transfering data to/from memory */
5134 qc->err_mask |= AC_ERR_HOST_BUS;
5135 ap->hsm_task_state = HSM_ST_ERR;
5145 /* check altstatus */
5146 status = ata_altstatus(ap);
5147 if (status & ATA_BUSY)
5150 /* check main status, clearing INTRQ */
5151 status = ata_chk_status(ap);
5152 if (unlikely(status & ATA_BUSY))
5155 /* ack bmdma irq events */
5156 ap->ops->irq_clear(ap);
5158 ata_hsm_move(ap, qc, status, 0);
5160 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5161 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5162 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5164 return 1; /* irq handled */
5167 ap->stats.idle_irq++;
5170 if ((ap->stats.idle_irq % 1000) == 0) {
5171 ata_irq_ack(ap, 0); /* debug trap */
5172 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5176 return 0; /* irq not handled */
5180 * ata_interrupt - Default ATA host interrupt handler
5181 * @irq: irq line (unused)
5182 * @dev_instance: pointer to our ata_host information structure
5184 * Default interrupt handler for PCI IDE devices. Calls
5185 * ata_host_intr() for each port that is not disabled.
5188 * Obtains host lock during operation.
5191 * IRQ_NONE or IRQ_HANDLED.
5194 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5196 struct ata_host *host = dev_instance;
5198 unsigned int handled = 0;
5199 unsigned long flags;
5201 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5202 spin_lock_irqsave(&host->lock, flags);
5204 for (i = 0; i < host->n_ports; i++) {
5205 struct ata_port *ap;
5207 ap = host->ports[i];
5209 !(ap->flags & ATA_FLAG_DISABLED)) {
5210 struct ata_queued_cmd *qc;
5212 qc = ata_qc_from_tag(ap, ap->active_tag);
5213 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5214 (qc->flags & ATA_QCFLAG_ACTIVE))
5215 handled |= ata_host_intr(ap, qc);
5219 spin_unlock_irqrestore(&host->lock, flags);
5221 return IRQ_RETVAL(handled);
5225 * sata_scr_valid - test whether SCRs are accessible
5226 * @ap: ATA port to test SCR accessibility for
5228 * Test whether SCRs are accessible for @ap.
5234 * 1 if SCRs are accessible, 0 otherwise.
5236 int sata_scr_valid(struct ata_port *ap)
5238 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5242 * sata_scr_read - read SCR register of the specified port
5243 * @ap: ATA port to read SCR for
5245 * @val: Place to store read value
5247 * Read SCR register @reg of @ap into *@val. This function is
5248 * guaranteed to succeed if the cable type of the port is SATA
5249 * and the port implements ->scr_read.
5255 * 0 on success, negative errno on failure.
5257 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5259 if (sata_scr_valid(ap)) {
5260 *val = ap->ops->scr_read(ap, reg);
5267 * sata_scr_write - write SCR register of the specified port
5268 * @ap: ATA port to write SCR for
5269 * @reg: SCR to write
5270 * @val: value to write
5272 * Write @val to SCR register @reg of @ap. This function is
5273 * guaranteed to succeed if the cable type of the port is SATA
5274 * and the port implements ->scr_read.
5280 * 0 on success, negative errno on failure.
5282 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5284 if (sata_scr_valid(ap)) {
5285 ap->ops->scr_write(ap, reg, val);
5292 * sata_scr_write_flush - write SCR register of the specified port and flush
5293 * @ap: ATA port to write SCR for
5294 * @reg: SCR to write
5295 * @val: value to write
5297 * This function is identical to sata_scr_write() except that this
5298 * function performs flush after writing to the register.
5304 * 0 on success, negative errno on failure.
5306 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5308 if (sata_scr_valid(ap)) {
5309 ap->ops->scr_write(ap, reg, val);
5310 ap->ops->scr_read(ap, reg);
5317 * ata_port_online - test whether the given port is online
5318 * @ap: ATA port to test
5320 * Test whether @ap is online. Note that this function returns 0
5321 * if online status of @ap cannot be obtained, so
5322 * ata_port_online(ap) != !ata_port_offline(ap).
5328 * 1 if the port online status is available and online.
5330 int ata_port_online(struct ata_port *ap)
5334 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5340 * ata_port_offline - test whether the given port is offline
5341 * @ap: ATA port to test
5343 * Test whether @ap is offline. Note that this function returns
5344 * 0 if offline status of @ap cannot be obtained, so
5345 * ata_port_online(ap) != !ata_port_offline(ap).
5351 * 1 if the port offline status is available and offline.
5353 int ata_port_offline(struct ata_port *ap)
5357 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5362 int ata_flush_cache(struct ata_device *dev)
5364 unsigned int err_mask;
5367 if (!ata_try_flush_cache(dev))
5370 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5371 cmd = ATA_CMD_FLUSH_EXT;
5373 cmd = ATA_CMD_FLUSH;
5375 err_mask = ata_do_simple_cmd(dev, cmd);
5377 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5384 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5385 unsigned int action, unsigned int ehi_flags,
5388 unsigned long flags;
5391 for (i = 0; i < host->n_ports; i++) {
5392 struct ata_port *ap = host->ports[i];
5394 /* Previous resume operation might still be in
5395 * progress. Wait for PM_PENDING to clear.
5397 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5398 ata_port_wait_eh(ap);
5399 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5402 /* request PM ops to EH */
5403 spin_lock_irqsave(ap->lock, flags);
5408 ap->pm_result = &rc;
5411 ap->pflags |= ATA_PFLAG_PM_PENDING;
5412 ap->eh_info.action |= action;
5413 ap->eh_info.flags |= ehi_flags;
5415 ata_port_schedule_eh(ap);
5417 spin_unlock_irqrestore(ap->lock, flags);
5419 /* wait and check result */
5421 ata_port_wait_eh(ap);
5422 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5432 * ata_host_suspend - suspend host
5433 * @host: host to suspend
5436 * Suspend @host. Actual operation is performed by EH. This
5437 * function requests EH to perform PM operations and waits for EH
5441 * Kernel thread context (may sleep).
5444 * 0 on success, -errno on failure.
5446 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5450 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5454 /* EH is quiescent now. Fail if we have any ready device.
5455 * This happens if hotplug occurs between completion of device
5456 * suspension and here.
5458 for (i = 0; i < host->n_ports; i++) {
5459 struct ata_port *ap = host->ports[i];
5461 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5462 struct ata_device *dev = &ap->device[j];
5464 if (ata_dev_ready(dev)) {
5465 ata_port_printk(ap, KERN_WARNING,
5466 "suspend failed, device %d "
5467 "still active\n", dev->devno);
5474 host->dev->power.power_state = mesg;
5478 ata_host_resume(host);
5483 * ata_host_resume - resume host
5484 * @host: host to resume
5486 * Resume @host. Actual operation is performed by EH. This
5487 * function requests EH to perform PM operations and returns.
5488 * Note that all resume operations are performed parallely.
5491 * Kernel thread context (may sleep).
5493 void ata_host_resume(struct ata_host *host)
5495 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5496 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5497 host->dev->power.power_state = PMSG_ON;
5501 * ata_port_start - Set port up for dma.
5502 * @ap: Port to initialize
5504 * Called just after data structures for each port are
5505 * initialized. Allocates space for PRD table.
5507 * May be used as the port_start() entry in ata_port_operations.
5510 * Inherited from caller.
5513 int ata_port_start (struct ata_port *ap)
5515 struct device *dev = ap->dev;
5518 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5522 rc = ata_pad_alloc(ap, dev);
5524 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5528 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5535 * ata_port_stop - Undo ata_port_start()
5536 * @ap: Port to shut down
5538 * Frees the PRD table.
5540 * May be used as the port_stop() entry in ata_port_operations.
5543 * Inherited from caller.
5546 void ata_port_stop (struct ata_port *ap)
5548 struct device *dev = ap->dev;
5550 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5551 ata_pad_free(ap, dev);
5554 void ata_host_stop (struct ata_host *host)
5556 if (host->mmio_base)
5557 iounmap(host->mmio_base);
5561 * ata_dev_init - Initialize an ata_device structure
5562 * @dev: Device structure to initialize
5564 * Initialize @dev in preparation for probing.
5567 * Inherited from caller.
5569 void ata_dev_init(struct ata_device *dev)
5571 struct ata_port *ap = dev->ap;
5572 unsigned long flags;
5574 /* SATA spd limit is bound to the first device */
5575 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5577 /* High bits of dev->flags are used to record warm plug
5578 * requests which occur asynchronously. Synchronize using
5581 spin_lock_irqsave(ap->lock, flags);
5582 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5583 spin_unlock_irqrestore(ap->lock, flags);
5585 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5586 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5587 dev->pio_mask = UINT_MAX;
5588 dev->mwdma_mask = UINT_MAX;
5589 dev->udma_mask = UINT_MAX;
5593 * ata_port_init - Initialize an ata_port structure
5594 * @ap: Structure to initialize
5595 * @host: Collection of hosts to which @ap belongs
5596 * @ent: Probe information provided by low-level driver
5597 * @port_no: Port number associated with this ata_port
5599 * Initialize a new ata_port structure.
5602 * Inherited from caller.
5604 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5605 const struct ata_probe_ent *ent, unsigned int port_no)
5609 ap->lock = &host->lock;
5610 ap->flags = ATA_FLAG_DISABLED;
5611 ap->id = ata_unique_id++;
5612 ap->ctl = ATA_DEVCTL_OBS;
5615 ap->port_no = port_no;
5616 if (port_no == 1 && ent->pinfo2) {
5617 ap->pio_mask = ent->pinfo2->pio_mask;
5618 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5619 ap->udma_mask = ent->pinfo2->udma_mask;
5620 ap->flags |= ent->pinfo2->flags;
5621 ap->ops = ent->pinfo2->port_ops;
5623 ap->pio_mask = ent->pio_mask;
5624 ap->mwdma_mask = ent->mwdma_mask;
5625 ap->udma_mask = ent->udma_mask;
5626 ap->flags |= ent->port_flags;
5627 ap->ops = ent->port_ops;
5629 ap->hw_sata_spd_limit = UINT_MAX;
5630 ap->active_tag = ATA_TAG_POISON;
5631 ap->last_ctl = 0xFF;
5633 #if defined(ATA_VERBOSE_DEBUG)
5634 /* turn on all debugging levels */
5635 ap->msg_enable = 0x00FF;
5636 #elif defined(ATA_DEBUG)
5637 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5639 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5642 INIT_WORK(&ap->port_task, NULL, NULL);
5643 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5644 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5645 INIT_LIST_HEAD(&ap->eh_done_q);
5646 init_waitqueue_head(&ap->eh_wait_q);
5648 /* set cable type */
5649 ap->cbl = ATA_CBL_NONE;
5650 if (ap->flags & ATA_FLAG_SATA)
5651 ap->cbl = ATA_CBL_SATA;
5653 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5654 struct ata_device *dev = &ap->device[i];
5661 ap->stats.unhandled_irq = 1;
5662 ap->stats.idle_irq = 1;
5665 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5669 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5670 * @ap: ATA port to initialize SCSI host for
5671 * @shost: SCSI host associated with @ap
5673 * Initialize SCSI host @shost associated with ATA port @ap.
5676 * Inherited from caller.
5678 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5680 ap->scsi_host = shost;
5682 shost->unique_id = ap->id;
5685 shost->max_channel = 1;
5686 shost->max_cmd_len = 12;
5690 * ata_port_add - Attach low-level ATA driver to system
5691 * @ent: Information provided by low-level driver
5692 * @host: Collections of ports to which we add
5693 * @port_no: Port number associated with this host
5695 * Attach low-level ATA driver to system.
5698 * PCI/etc. bus probe sem.
5701 * New ata_port on success, for NULL on error.
5703 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5704 struct ata_host *host,
5705 unsigned int port_no)
5707 struct Scsi_Host *shost;
5708 struct ata_port *ap;
5712 if (!ent->port_ops->error_handler &&
5713 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5714 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5719 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5723 shost->transportt = &ata_scsi_transport_template;
5725 ap = ata_shost_to_port(shost);
5727 ata_port_init(ap, host, ent, port_no);
5728 ata_port_init_shost(ap, shost);
5734 * ata_sas_host_init - Initialize a host struct
5735 * @host: host to initialize
5736 * @dev: device host is attached to
5737 * @flags: host flags
5741 * PCI/etc. bus probe sem.
5745 void ata_host_init(struct ata_host *host, struct device *dev,
5746 unsigned long flags, const struct ata_port_operations *ops)
5748 spin_lock_init(&host->lock);
5750 host->flags = flags;
5755 * ata_device_add - Register hardware device with ATA and SCSI layers
5756 * @ent: Probe information describing hardware device to be registered
5758 * This function processes the information provided in the probe
5759 * information struct @ent, allocates the necessary ATA and SCSI
5760 * host information structures, initializes them, and registers
5761 * everything with requisite kernel subsystems.
5763 * This function requests irqs, probes the ATA bus, and probes
5767 * PCI/etc. bus probe sem.
5770 * Number of ports registered. Zero on error (no ports registered).
5772 int ata_device_add(const struct ata_probe_ent *ent)
5775 struct device *dev = ent->dev;
5776 struct ata_host *host;
5781 if (ent->irq == 0) {
5782 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5785 /* alloc a container for our list of ATA ports (buses) */
5786 host = kzalloc(sizeof(struct ata_host) +
5787 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5791 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5792 host->n_ports = ent->n_ports;
5793 host->irq = ent->irq;
5794 host->irq2 = ent->irq2;
5795 host->mmio_base = ent->mmio_base;
5796 host->private_data = ent->private_data;
5798 /* register each port bound to this device */
5799 for (i = 0; i < host->n_ports; i++) {
5800 struct ata_port *ap;
5801 unsigned long xfer_mode_mask;
5802 int irq_line = ent->irq;
5804 ap = ata_port_add(ent, host, i);
5805 host->ports[i] = ap;
5810 if (ent->dummy_port_mask & (1 << i)) {
5811 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5812 ap->ops = &ata_dummy_port_ops;
5817 rc = ap->ops->port_start(ap);
5819 host->ports[i] = NULL;
5820 scsi_host_put(ap->scsi_host);
5824 /* Report the secondary IRQ for second channel legacy */
5825 if (i == 1 && ent->irq2)
5826 irq_line = ent->irq2;
5828 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5829 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5830 (ap->pio_mask << ATA_SHIFT_PIO);
5832 /* print per-port info to dmesg */
5833 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5834 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5835 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5836 ata_mode_string(xfer_mode_mask),
5837 ap->ioaddr.cmd_addr,
5838 ap->ioaddr.ctl_addr,
5839 ap->ioaddr.bmdma_addr,
5842 /* freeze port before requesting IRQ */
5843 ata_eh_freeze_port(ap);
5846 /* obtain irq, that may be shared between channels */
5847 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5850 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5855 /* do we have a second IRQ for the other channel, eg legacy mode */
5857 /* We will get weird core code crashes later if this is true
5859 BUG_ON(ent->irq == ent->irq2);
5861 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5864 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5866 goto err_out_free_irq;
5870 /* perform each probe synchronously */
5871 DPRINTK("probe begin\n");
5872 for (i = 0; i < host->n_ports; i++) {
5873 struct ata_port *ap = host->ports[i];
5877 /* init sata_spd_limit to the current value */
5878 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5879 int spd = (scontrol >> 4) & 0xf;
5880 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5882 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5884 rc = scsi_add_host(ap->scsi_host, dev);
5886 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5887 /* FIXME: do something useful here */
5888 /* FIXME: handle unconditional calls to
5889 * scsi_scan_host and ata_host_remove, below,
5894 if (ap->ops->error_handler) {
5895 struct ata_eh_info *ehi = &ap->eh_info;
5896 unsigned long flags;
5900 /* kick EH for boot probing */
5901 spin_lock_irqsave(ap->lock, flags);
5903 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5904 ehi->action |= ATA_EH_SOFTRESET;
5905 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5907 ap->pflags |= ATA_PFLAG_LOADING;
5908 ata_port_schedule_eh(ap);
5910 spin_unlock_irqrestore(ap->lock, flags);
5912 /* wait for EH to finish */
5913 ata_port_wait_eh(ap);
5915 DPRINTK("ata%u: bus probe begin\n", ap->id);
5916 rc = ata_bus_probe(ap);
5917 DPRINTK("ata%u: bus probe end\n", ap->id);
5920 /* FIXME: do something useful here?
5921 * Current libata behavior will
5922 * tear down everything when
5923 * the module is removed
5924 * or the h/w is unplugged.
5930 /* probes are done, now scan each port's disk(s) */
5931 DPRINTK("host probe begin\n");
5932 for (i = 0; i < host->n_ports; i++) {
5933 struct ata_port *ap = host->ports[i];
5935 ata_scsi_scan_host(ap);
5938 dev_set_drvdata(dev, host);
5940 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5941 return ent->n_ports; /* success */
5944 free_irq(ent->irq, host);
5946 for (i = 0; i < host->n_ports; i++) {
5947 struct ata_port *ap = host->ports[i];
5949 ap->ops->port_stop(ap);
5950 scsi_host_put(ap->scsi_host);
5955 VPRINTK("EXIT, returning 0\n");
5960 * ata_port_detach - Detach ATA port in prepration of device removal
5961 * @ap: ATA port to be detached
5963 * Detach all ATA devices and the associated SCSI devices of @ap;
5964 * then, remove the associated SCSI host. @ap is guaranteed to
5965 * be quiescent on return from this function.
5968 * Kernel thread context (may sleep).
5970 void ata_port_detach(struct ata_port *ap)
5972 unsigned long flags;
5975 if (!ap->ops->error_handler)
5978 /* tell EH we're leaving & flush EH */
5979 spin_lock_irqsave(ap->lock, flags);
5980 ap->pflags |= ATA_PFLAG_UNLOADING;
5981 spin_unlock_irqrestore(ap->lock, flags);
5983 ata_port_wait_eh(ap);
5985 /* EH is now guaranteed to see UNLOADING, so no new device
5986 * will be attached. Disable all existing devices.
5988 spin_lock_irqsave(ap->lock, flags);
5990 for (i = 0; i < ATA_MAX_DEVICES; i++)
5991 ata_dev_disable(&ap->device[i]);
5993 spin_unlock_irqrestore(ap->lock, flags);
5995 /* Final freeze & EH. All in-flight commands are aborted. EH
5996 * will be skipped and retrials will be terminated with bad
5999 spin_lock_irqsave(ap->lock, flags);
6000 ata_port_freeze(ap); /* won't be thawed */
6001 spin_unlock_irqrestore(ap->lock, flags);
6003 ata_port_wait_eh(ap);
6005 /* Flush hotplug task. The sequence is similar to
6006 * ata_port_flush_task().
6008 flush_workqueue(ata_aux_wq);
6009 cancel_delayed_work(&ap->hotplug_task);
6010 flush_workqueue(ata_aux_wq);
6013 /* remove the associated SCSI host */
6014 scsi_remove_host(ap->scsi_host);
6018 * ata_host_remove - PCI layer callback for device removal
6019 * @host: ATA host set that was removed
6021 * Unregister all objects associated with this host set. Free those
6025 * Inherited from calling layer (may sleep).
6028 void ata_host_remove(struct ata_host *host)
6032 for (i = 0; i < host->n_ports; i++)
6033 ata_port_detach(host->ports[i]);
6035 free_irq(host->irq, host);
6037 free_irq(host->irq2, host);
6039 for (i = 0; i < host->n_ports; i++) {
6040 struct ata_port *ap = host->ports[i];
6042 ata_scsi_release(ap->scsi_host);
6044 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
6045 struct ata_ioports *ioaddr = &ap->ioaddr;
6047 /* FIXME: Add -ac IDE pci mods to remove these special cases */
6048 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
6049 release_region(ATA_PRIMARY_CMD, 8);
6050 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
6051 release_region(ATA_SECONDARY_CMD, 8);
6054 scsi_host_put(ap->scsi_host);
6057 if (host->ops->host_stop)
6058 host->ops->host_stop(host);
6064 * ata_scsi_release - SCSI layer callback hook for host unload
6065 * @shost: libata host to be unloaded
6067 * Performs all duties necessary to shut down a libata port...
6068 * Kill port kthread, disable port, and release resources.
6071 * Inherited from SCSI layer.
6077 int ata_scsi_release(struct Scsi_Host *shost)
6079 struct ata_port *ap = ata_shost_to_port(shost);
6083 ap->ops->port_disable(ap);
6084 ap->ops->port_stop(ap);
6090 struct ata_probe_ent *
6091 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6093 struct ata_probe_ent *probe_ent;
6095 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
6097 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6098 kobject_name(&(dev->kobj)));
6102 INIT_LIST_HEAD(&probe_ent->node);
6103 probe_ent->dev = dev;
6105 probe_ent->sht = port->sht;
6106 probe_ent->port_flags = port->flags;
6107 probe_ent->pio_mask = port->pio_mask;
6108 probe_ent->mwdma_mask = port->mwdma_mask;
6109 probe_ent->udma_mask = port->udma_mask;
6110 probe_ent->port_ops = port->port_ops;
6111 probe_ent->private_data = port->private_data;
6117 * ata_std_ports - initialize ioaddr with standard port offsets.
6118 * @ioaddr: IO address structure to be initialized
6120 * Utility function which initializes data_addr, error_addr,
6121 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6122 * device_addr, status_addr, and command_addr to standard offsets
6123 * relative to cmd_addr.
6125 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6128 void ata_std_ports(struct ata_ioports *ioaddr)
6130 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6131 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6132 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6133 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6134 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6135 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6136 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6137 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6138 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6139 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6145 void ata_pci_host_stop (struct ata_host *host)
6147 struct pci_dev *pdev = to_pci_dev(host->dev);
6149 pci_iounmap(pdev, host->mmio_base);
6153 * ata_pci_remove_one - PCI layer callback for device removal
6154 * @pdev: PCI device that was removed
6156 * PCI layer indicates to libata via this hook that
6157 * hot-unplug or module unload event has occurred.
6158 * Handle this by unregistering all objects associated
6159 * with this PCI device. Free those objects. Then finally
6160 * release PCI resources and disable device.
6163 * Inherited from PCI layer (may sleep).
6166 void ata_pci_remove_one (struct pci_dev *pdev)
6168 struct device *dev = pci_dev_to_dev(pdev);
6169 struct ata_host *host = dev_get_drvdata(dev);
6171 ata_host_remove(host);
6173 pci_release_regions(pdev);
6174 pci_disable_device(pdev);
6175 dev_set_drvdata(dev, NULL);
6178 /* move to PCI subsystem */
6179 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6181 unsigned long tmp = 0;
6183 switch (bits->width) {
6186 pci_read_config_byte(pdev, bits->reg, &tmp8);
6192 pci_read_config_word(pdev, bits->reg, &tmp16);
6198 pci_read_config_dword(pdev, bits->reg, &tmp32);
6209 return (tmp == bits->val) ? 1 : 0;
6212 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6214 pci_save_state(pdev);
6216 if (mesg.event == PM_EVENT_SUSPEND) {
6217 pci_disable_device(pdev);
6218 pci_set_power_state(pdev, PCI_D3hot);
6222 void ata_pci_device_do_resume(struct pci_dev *pdev)
6224 pci_set_power_state(pdev, PCI_D0);
6225 pci_restore_state(pdev);
6226 pci_enable_device(pdev);
6227 pci_set_master(pdev);
6230 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6232 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6235 rc = ata_host_suspend(host, mesg);
6239 ata_pci_device_do_suspend(pdev, mesg);
6244 int ata_pci_device_resume(struct pci_dev *pdev)
6246 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6248 ata_pci_device_do_resume(pdev);
6249 ata_host_resume(host);
6252 #endif /* CONFIG_PCI */
6255 static int __init ata_init(void)
6257 ata_probe_timeout *= HZ;
6258 ata_wq = create_workqueue("ata");
6262 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6264 destroy_workqueue(ata_wq);
6268 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6272 static void __exit ata_exit(void)
6274 destroy_workqueue(ata_wq);
6275 destroy_workqueue(ata_aux_wq);
6278 subsys_initcall(ata_init);
6279 module_exit(ata_exit);
6281 static unsigned long ratelimit_time;
6282 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6284 int ata_ratelimit(void)
6287 unsigned long flags;
6289 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6291 if (time_after(jiffies, ratelimit_time)) {
6293 ratelimit_time = jiffies + (HZ/5);
6297 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6303 * ata_wait_register - wait until register value changes
6304 * @reg: IO-mapped register
6305 * @mask: Mask to apply to read register value
6306 * @val: Wait condition
6307 * @interval_msec: polling interval in milliseconds
6308 * @timeout_msec: timeout in milliseconds
6310 * Waiting for some bits of register to change is a common
6311 * operation for ATA controllers. This function reads 32bit LE
6312 * IO-mapped register @reg and tests for the following condition.
6314 * (*@reg & mask) != val
6316 * If the condition is met, it returns; otherwise, the process is
6317 * repeated after @interval_msec until timeout.
6320 * Kernel thread context (may sleep)
6323 * The final register value.
6325 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6326 unsigned long interval_msec,
6327 unsigned long timeout_msec)
6329 unsigned long timeout;
6332 tmp = ioread32(reg);
6334 /* Calculate timeout _after_ the first read to make sure
6335 * preceding writes reach the controller before starting to
6336 * eat away the timeout.
6338 timeout = jiffies + (timeout_msec * HZ) / 1000;
6340 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6341 msleep(interval_msec);
6342 tmp = ioread32(reg);
6351 static void ata_dummy_noret(struct ata_port *ap) { }
6352 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6353 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6355 static u8 ata_dummy_check_status(struct ata_port *ap)
6360 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6362 return AC_ERR_SYSTEM;
6365 const struct ata_port_operations ata_dummy_port_ops = {
6366 .port_disable = ata_port_disable,
6367 .check_status = ata_dummy_check_status,
6368 .check_altstatus = ata_dummy_check_status,
6369 .dev_select = ata_noop_dev_select,
6370 .qc_prep = ata_noop_qc_prep,
6371 .qc_issue = ata_dummy_qc_issue,
6372 .freeze = ata_dummy_noret,
6373 .thaw = ata_dummy_noret,
6374 .error_handler = ata_dummy_noret,
6375 .post_internal_cmd = ata_dummy_qc_noret,
6376 .irq_clear = ata_dummy_noret,
6377 .port_start = ata_dummy_ret0,
6378 .port_stop = ata_dummy_noret,
6382 * libata is essentially a library of internal helper functions for
6383 * low-level ATA host controller drivers. As such, the API/ABI is
6384 * likely to change as new drivers are added and updated.
6385 * Do not depend on ABI/API stability.
6388 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6389 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6390 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6391 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6392 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6393 EXPORT_SYMBOL_GPL(ata_std_ports);
6394 EXPORT_SYMBOL_GPL(ata_host_init);
6395 EXPORT_SYMBOL_GPL(ata_device_add);
6396 EXPORT_SYMBOL_GPL(ata_port_detach);
6397 EXPORT_SYMBOL_GPL(ata_host_remove);
6398 EXPORT_SYMBOL_GPL(ata_sg_init);
6399 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6400 EXPORT_SYMBOL_GPL(ata_hsm_move);
6401 EXPORT_SYMBOL_GPL(ata_qc_complete);
6402 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6403 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6404 EXPORT_SYMBOL_GPL(ata_tf_load);
6405 EXPORT_SYMBOL_GPL(ata_tf_read);
6406 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6407 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6408 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6409 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6410 EXPORT_SYMBOL_GPL(ata_check_status);
6411 EXPORT_SYMBOL_GPL(ata_altstatus);
6412 EXPORT_SYMBOL_GPL(ata_exec_command);
6413 EXPORT_SYMBOL_GPL(ata_port_start);
6414 EXPORT_SYMBOL_GPL(ata_port_stop);
6415 EXPORT_SYMBOL_GPL(ata_host_stop);
6416 EXPORT_SYMBOL_GPL(ata_interrupt);
6417 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6418 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6419 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6420 EXPORT_SYMBOL_GPL(ata_qc_prep);
6421 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6422 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6423 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6424 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6425 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6426 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6427 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6428 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6429 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6430 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6431 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6432 EXPORT_SYMBOL_GPL(ata_port_probe);
6433 EXPORT_SYMBOL_GPL(sata_set_spd);
6434 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6435 EXPORT_SYMBOL_GPL(sata_phy_resume);
6436 EXPORT_SYMBOL_GPL(sata_phy_reset);
6437 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6438 EXPORT_SYMBOL_GPL(ata_bus_reset);
6439 EXPORT_SYMBOL_GPL(ata_std_prereset);
6440 EXPORT_SYMBOL_GPL(ata_std_softreset);
6441 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6442 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6443 EXPORT_SYMBOL_GPL(ata_std_postreset);
6444 EXPORT_SYMBOL_GPL(ata_dev_classify);
6445 EXPORT_SYMBOL_GPL(ata_dev_pair);
6446 EXPORT_SYMBOL_GPL(ata_port_disable);
6447 EXPORT_SYMBOL_GPL(ata_ratelimit);
6448 EXPORT_SYMBOL_GPL(ata_wait_register);
6449 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6450 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6451 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6452 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6453 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6454 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6455 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6456 EXPORT_SYMBOL_GPL(ata_scsi_release);
6457 EXPORT_SYMBOL_GPL(ata_host_intr);
6458 EXPORT_SYMBOL_GPL(sata_scr_valid);
6459 EXPORT_SYMBOL_GPL(sata_scr_read);
6460 EXPORT_SYMBOL_GPL(sata_scr_write);
6461 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6462 EXPORT_SYMBOL_GPL(ata_port_online);
6463 EXPORT_SYMBOL_GPL(ata_port_offline);
6464 EXPORT_SYMBOL_GPL(ata_host_suspend);
6465 EXPORT_SYMBOL_GPL(ata_host_resume);
6466 EXPORT_SYMBOL_GPL(ata_id_string);
6467 EXPORT_SYMBOL_GPL(ata_id_c_string);
6468 EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6469 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6471 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6472 EXPORT_SYMBOL_GPL(ata_timing_compute);
6473 EXPORT_SYMBOL_GPL(ata_timing_merge);
6476 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6477 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6478 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6479 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6480 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6481 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6482 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6483 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6484 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6485 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6486 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6487 #endif /* CONFIG_PCI */
6489 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6490 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6492 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6493 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6494 EXPORT_SYMBOL_GPL(ata_port_abort);
6495 EXPORT_SYMBOL_GPL(ata_port_freeze);
6496 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6497 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6498 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6499 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6500 EXPORT_SYMBOL_GPL(ata_do_eh);