2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77 static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80 static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
82 static void ata_dev_xfermask(struct ata_device *dev);
83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
85 unsigned int ata_print_id = 1;
86 static struct workqueue_struct *ata_wq;
88 struct workqueue_struct *ata_aux_wq;
90 struct ata_force_param {
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
99 struct ata_force_ent {
102 struct ata_force_param param;
105 static struct ata_force_ent *ata_force_tbl;
106 static int ata_force_tbl_size;
108 static char ata_force_param_buf[PAGE_SIZE] __initdata;
109 /* param_buf is thrown away after initialization, disallow read */
110 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
111 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
113 int atapi_enabled = 1;
114 module_param(atapi_enabled, int, 0444);
115 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
117 static int atapi_dmadir = 0;
118 module_param(atapi_dmadir, int, 0444);
119 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
121 int atapi_passthru16 = 1;
122 module_param(atapi_passthru16, int, 0444);
123 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
126 module_param_named(fua, libata_fua, int, 0444);
127 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
129 static int ata_ignore_hpa;
130 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
131 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
133 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
134 module_param_named(dma, libata_dma_mask, int, 0444);
135 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
137 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
138 module_param(ata_probe_timeout, int, 0444);
139 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
141 int libata_noacpi = 0;
142 module_param_named(noacpi, libata_noacpi, int, 0444);
143 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
145 int libata_allow_tpm = 0;
146 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
147 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
149 MODULE_AUTHOR("Jeff Garzik");
150 MODULE_DESCRIPTION("Library module for ATA devices");
151 MODULE_LICENSE("GPL");
152 MODULE_VERSION(DRV_VERSION);
156 * ata_force_cbl - force cable type according to libata.force
157 * @ap: ATA port of interest
159 * Force cable type according to libata.force and whine about it.
160 * The last entry which has matching port number is used, so it
161 * can be specified as part of device force parameters. For
162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
168 void ata_force_cbl(struct ata_port *ap)
172 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
173 const struct ata_force_ent *fe = &ata_force_tbl[i];
175 if (fe->port != -1 && fe->port != ap->print_id)
178 if (fe->param.cbl == ATA_CBL_NONE)
181 ap->cbl = fe->param.cbl;
182 ata_port_printk(ap, KERN_NOTICE,
183 "FORCE: cable set to %s\n", fe->param.name);
189 * ata_force_spd_limit - force SATA spd limit according to libata.force
190 * @link: ATA link of interest
192 * Force SATA spd limit according to libata.force and whine about
193 * it. When only the port part is specified (e.g. 1:), the limit
194 * applies to all links connected to both the host link and all
195 * fan-out ports connected via PMP. If the device part is
196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
197 * link not the host link. Device number 15 always points to the
198 * host link whether PMP is attached or not.
203 static void ata_force_spd_limit(struct ata_link *link)
207 if (ata_is_host_link(link))
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
215 if (fe->port != -1 && fe->port != link->ap->print_id)
218 if (fe->device != -1 && fe->device != linkno)
221 if (!fe->param.spd_limit)
224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
225 ata_link_printk(link, KERN_NOTICE,
226 "FORCE: PHY spd limit set to %s\n", fe->param.name);
232 * ata_force_xfermask - force xfermask according to libata.force
233 * @dev: ATA device of interest
235 * Force xfer_mask according to libata.force and whine about it.
236 * For consistency with link selection, device number 15 selects
237 * the first device connected to the host link.
242 static void ata_force_xfermask(struct ata_device *dev)
244 int devno = dev->link->pmp + dev->devno;
245 int alt_devno = devno;
248 /* allow n.15 for the first device attached to host port */
249 if (ata_is_host_link(dev->link) && devno == 0)
252 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
253 const struct ata_force_ent *fe = &ata_force_tbl[i];
254 unsigned long pio_mask, mwdma_mask, udma_mask;
256 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
259 if (fe->device != -1 && fe->device != devno &&
260 fe->device != alt_devno)
263 if (!fe->param.xfer_mask)
266 ata_unpack_xfermask(fe->param.xfer_mask,
267 &pio_mask, &mwdma_mask, &udma_mask);
269 dev->udma_mask = udma_mask;
270 else if (mwdma_mask) {
272 dev->mwdma_mask = mwdma_mask;
276 dev->pio_mask = pio_mask;
279 ata_dev_printk(dev, KERN_NOTICE,
280 "FORCE: xfer_mask set to %s\n", fe->param.name);
286 * ata_force_horkage - force horkage according to libata.force
287 * @dev: ATA device of interest
289 * Force horkage according to libata.force and whine about it.
290 * For consistency with link selection, device number 15 selects
291 * the first device connected to the host link.
296 static void ata_force_horkage(struct ata_device *dev)
298 int devno = dev->link->pmp + dev->devno;
299 int alt_devno = devno;
302 /* allow n.15 for the first device attached to host port */
303 if (ata_is_host_link(dev->link) && devno == 0)
306 for (i = 0; i < ata_force_tbl_size; i++) {
307 const struct ata_force_ent *fe = &ata_force_tbl[i];
309 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
312 if (fe->device != -1 && fe->device != devno &&
313 fe->device != alt_devno)
316 if (!(~dev->horkage & fe->param.horkage_on) &&
317 !(dev->horkage & fe->param.horkage_off))
320 dev->horkage |= fe->param.horkage_on;
321 dev->horkage &= ~fe->param.horkage_off;
323 ata_dev_printk(dev, KERN_NOTICE,
324 "FORCE: horkage modified (%s)\n", fe->param.name);
329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
330 * @opcode: SCSI opcode
332 * Determine ATAPI command type from @opcode.
338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
340 int atapi_cmd_type(u8 opcode)
349 case GPCMD_WRITE_AND_VERIFY_10:
353 case GPCMD_READ_CD_MSF:
354 return ATAPI_READ_CD;
358 if (atapi_passthru16)
359 return ATAPI_PASS_THRU;
367 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
368 * @tf: Taskfile to convert
369 * @pmp: Port multiplier port
370 * @is_cmd: This FIS is for command
371 * @fis: Buffer into which data will output
373 * Converts a standard ATA taskfile to a Serial ATA
374 * FIS structure (Register - Host to Device).
377 * Inherited from caller.
379 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
381 fis[0] = 0x27; /* Register - Host to Device FIS */
382 fis[1] = pmp & 0xf; /* Port multiplier number*/
384 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
386 fis[2] = tf->command;
387 fis[3] = tf->feature;
394 fis[8] = tf->hob_lbal;
395 fis[9] = tf->hob_lbam;
396 fis[10] = tf->hob_lbah;
397 fis[11] = tf->hob_feature;
400 fis[13] = tf->hob_nsect;
411 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
412 * @fis: Buffer from which data will be input
413 * @tf: Taskfile to output
415 * Converts a serial ATA FIS structure to a standard ATA taskfile.
418 * Inherited from caller.
421 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
423 tf->command = fis[2]; /* status */
424 tf->feature = fis[3]; /* error */
431 tf->hob_lbal = fis[8];
432 tf->hob_lbam = fis[9];
433 tf->hob_lbah = fis[10];
436 tf->hob_nsect = fis[13];
439 static const u8 ata_rw_cmds[] = {
443 ATA_CMD_READ_MULTI_EXT,
444 ATA_CMD_WRITE_MULTI_EXT,
448 ATA_CMD_WRITE_MULTI_FUA_EXT,
452 ATA_CMD_PIO_READ_EXT,
453 ATA_CMD_PIO_WRITE_EXT,
466 ATA_CMD_WRITE_FUA_EXT
470 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
471 * @tf: command to examine and configure
472 * @dev: device tf belongs to
474 * Examine the device configuration and tf->flags to calculate
475 * the proper read/write commands and protocol to use.
480 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
484 int index, fua, lba48, write;
486 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
487 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
488 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
490 if (dev->flags & ATA_DFLAG_PIO) {
491 tf->protocol = ATA_PROT_PIO;
492 index = dev->multi_count ? 0 : 8;
493 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
494 /* Unable to use DMA due to host limitation */
495 tf->protocol = ATA_PROT_PIO;
496 index = dev->multi_count ? 0 : 8;
498 tf->protocol = ATA_PROT_DMA;
502 cmd = ata_rw_cmds[index + fua + lba48 + write];
511 * ata_tf_read_block - Read block address from ATA taskfile
512 * @tf: ATA taskfile of interest
513 * @dev: ATA device @tf belongs to
518 * Read block address from @tf. This function can handle all
519 * three address formats - LBA, LBA48 and CHS. tf->protocol and
520 * flags select the address format to use.
523 * Block address read from @tf.
525 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
529 if (tf->flags & ATA_TFLAG_LBA) {
530 if (tf->flags & ATA_TFLAG_LBA48) {
531 block |= (u64)tf->hob_lbah << 40;
532 block |= (u64)tf->hob_lbam << 32;
533 block |= tf->hob_lbal << 24;
535 block |= (tf->device & 0xf) << 24;
537 block |= tf->lbah << 16;
538 block |= tf->lbam << 8;
543 cyl = tf->lbam | (tf->lbah << 8);
544 head = tf->device & 0xf;
547 block = (cyl * dev->heads + head) * dev->sectors + sect;
554 * ata_build_rw_tf - Build ATA taskfile for given read/write request
555 * @tf: Target ATA taskfile
556 * @dev: ATA device @tf belongs to
557 * @block: Block address
558 * @n_block: Number of blocks
559 * @tf_flags: RW/FUA etc...
565 * Build ATA taskfile @tf for read/write request described by
566 * @block, @n_block, @tf_flags and @tag on @dev.
570 * 0 on success, -ERANGE if the request is too large for @dev,
571 * -EINVAL if the request is invalid.
573 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
574 u64 block, u32 n_block, unsigned int tf_flags,
577 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
578 tf->flags |= tf_flags;
580 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
582 if (!lba_48_ok(block, n_block))
585 tf->protocol = ATA_PROT_NCQ;
586 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
588 if (tf->flags & ATA_TFLAG_WRITE)
589 tf->command = ATA_CMD_FPDMA_WRITE;
591 tf->command = ATA_CMD_FPDMA_READ;
593 tf->nsect = tag << 3;
594 tf->hob_feature = (n_block >> 8) & 0xff;
595 tf->feature = n_block & 0xff;
597 tf->hob_lbah = (block >> 40) & 0xff;
598 tf->hob_lbam = (block >> 32) & 0xff;
599 tf->hob_lbal = (block >> 24) & 0xff;
600 tf->lbah = (block >> 16) & 0xff;
601 tf->lbam = (block >> 8) & 0xff;
602 tf->lbal = block & 0xff;
605 if (tf->flags & ATA_TFLAG_FUA)
606 tf->device |= 1 << 7;
607 } else if (dev->flags & ATA_DFLAG_LBA) {
608 tf->flags |= ATA_TFLAG_LBA;
610 if (lba_28_ok(block, n_block)) {
612 tf->device |= (block >> 24) & 0xf;
613 } else if (lba_48_ok(block, n_block)) {
614 if (!(dev->flags & ATA_DFLAG_LBA48))
618 tf->flags |= ATA_TFLAG_LBA48;
620 tf->hob_nsect = (n_block >> 8) & 0xff;
622 tf->hob_lbah = (block >> 40) & 0xff;
623 tf->hob_lbam = (block >> 32) & 0xff;
624 tf->hob_lbal = (block >> 24) & 0xff;
626 /* request too large even for LBA48 */
629 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
632 tf->nsect = n_block & 0xff;
634 tf->lbah = (block >> 16) & 0xff;
635 tf->lbam = (block >> 8) & 0xff;
636 tf->lbal = block & 0xff;
638 tf->device |= ATA_LBA;
641 u32 sect, head, cyl, track;
643 /* The request -may- be too large for CHS addressing. */
644 if (!lba_28_ok(block, n_block))
647 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
650 /* Convert LBA to CHS */
651 track = (u32)block / dev->sectors;
652 cyl = track / dev->heads;
653 head = track % dev->heads;
654 sect = (u32)block % dev->sectors + 1;
656 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
657 (u32)block, track, cyl, head, sect);
659 /* Check whether the converted CHS can fit.
663 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
666 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
677 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
678 * @pio_mask: pio_mask
679 * @mwdma_mask: mwdma_mask
680 * @udma_mask: udma_mask
682 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
683 * unsigned int xfer_mask.
691 unsigned long ata_pack_xfermask(unsigned long pio_mask,
692 unsigned long mwdma_mask,
693 unsigned long udma_mask)
695 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
696 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
697 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
701 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
702 * @xfer_mask: xfer_mask to unpack
703 * @pio_mask: resulting pio_mask
704 * @mwdma_mask: resulting mwdma_mask
705 * @udma_mask: resulting udma_mask
707 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
708 * Any NULL distination masks will be ignored.
710 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
711 unsigned long *mwdma_mask, unsigned long *udma_mask)
714 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
716 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
718 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
721 static const struct ata_xfer_ent {
725 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
726 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
727 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
732 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
733 * @xfer_mask: xfer_mask of interest
735 * Return matching XFER_* value for @xfer_mask. Only the highest
736 * bit of @xfer_mask is considered.
742 * Matching XFER_* value, 0xff if no match found.
744 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
746 int highbit = fls(xfer_mask) - 1;
747 const struct ata_xfer_ent *ent;
749 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
750 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
751 return ent->base + highbit - ent->shift;
756 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
757 * @xfer_mode: XFER_* of interest
759 * Return matching xfer_mask for @xfer_mode.
765 * Matching xfer_mask, 0 if no match found.
767 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
769 const struct ata_xfer_ent *ent;
771 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
772 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
773 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
774 & ~((1 << ent->shift) - 1);
779 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
780 * @xfer_mode: XFER_* of interest
782 * Return matching xfer_shift for @xfer_mode.
788 * Matching xfer_shift, -1 if no match found.
790 int ata_xfer_mode2shift(unsigned long xfer_mode)
792 const struct ata_xfer_ent *ent;
794 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
795 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
801 * ata_mode_string - convert xfer_mask to string
802 * @xfer_mask: mask of bits supported; only highest bit counts.
804 * Determine string which represents the highest speed
805 * (highest bit in @modemask).
811 * Constant C string representing highest speed listed in
812 * @mode_mask, or the constant C string "<n/a>".
814 const char *ata_mode_string(unsigned long xfer_mask)
816 static const char * const xfer_mode_str[] = {
840 highbit = fls(xfer_mask) - 1;
841 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
842 return xfer_mode_str[highbit];
846 static const char *sata_spd_string(unsigned int spd)
848 static const char * const spd_str[] = {
853 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
855 return spd_str[spd - 1];
858 void ata_dev_disable(struct ata_device *dev)
860 if (ata_dev_enabled(dev)) {
861 if (ata_msg_drv(dev->link->ap))
862 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
863 ata_acpi_on_disable(dev);
864 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
870 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
872 struct ata_link *link = dev->link;
873 struct ata_port *ap = link->ap;
875 unsigned int err_mask;
879 * disallow DIPM for drivers which haven't set
880 * ATA_FLAG_IPM. This is because when DIPM is enabled,
881 * phy ready will be set in the interrupt status on
882 * state changes, which will cause some drivers to
883 * think there are errors - additionally drivers will
884 * need to disable hot plug.
886 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
887 ap->pm_policy = NOT_AVAILABLE;
892 * For DIPM, we will only enable it for the
895 * Why? Because Disks are too stupid to know that
896 * If the host rejects a request to go to SLUMBER
897 * they should retry at PARTIAL, and instead it
898 * just would give up. So, for medium_power to
899 * work at all, we need to only allow HIPM.
901 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
907 /* no restrictions on IPM transitions */
908 scontrol &= ~(0x3 << 8);
909 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
914 if (dev->flags & ATA_DFLAG_DIPM)
915 err_mask = ata_dev_set_feature(dev,
916 SETFEATURES_SATA_ENABLE, SATA_DIPM);
919 /* allow IPM to PARTIAL */
920 scontrol &= ~(0x1 << 8);
921 scontrol |= (0x2 << 8);
922 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
927 * we don't have to disable DIPM since IPM flags
928 * disallow transitions to SLUMBER, which effectively
929 * disable DIPM if it does not support PARTIAL
933 case MAX_PERFORMANCE:
934 /* disable all IPM transitions */
935 scontrol |= (0x3 << 8);
936 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
941 * we don't have to disable DIPM since IPM flags
942 * disallow all transitions which effectively
943 * disable DIPM anyway.
948 /* FIXME: handle SET FEATURES failure */
955 * ata_dev_enable_pm - enable SATA interface power management
956 * @dev: device to enable power management
957 * @policy: the link power management policy
959 * Enable SATA Interface power management. This will enable
960 * Device Interface Power Management (DIPM) for min_power
961 * policy, and then call driver specific callbacks for
962 * enabling Host Initiated Power management.
965 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
967 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
970 struct ata_port *ap = dev->link->ap;
972 /* set HIPM first, then DIPM */
973 if (ap->ops->enable_pm)
974 rc = ap->ops->enable_pm(ap, policy);
977 rc = ata_dev_set_dipm(dev, policy);
981 ap->pm_policy = MAX_PERFORMANCE;
983 ap->pm_policy = policy;
984 return /* rc */; /* hopefully we can use 'rc' eventually */
989 * ata_dev_disable_pm - disable SATA interface power management
990 * @dev: device to disable power management
992 * Disable SATA Interface power management. This will disable
993 * Device Interface Power Management (DIPM) without changing
994 * policy, call driver specific callbacks for disabling Host
995 * Initiated Power management.
1000 static void ata_dev_disable_pm(struct ata_device *dev)
1002 struct ata_port *ap = dev->link->ap;
1004 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1005 if (ap->ops->disable_pm)
1006 ap->ops->disable_pm(ap);
1008 #endif /* CONFIG_PM */
1010 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1012 ap->pm_policy = policy;
1013 ap->link.eh_info.action |= ATA_EH_LPM;
1014 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1015 ata_port_schedule_eh(ap);
1019 static void ata_lpm_enable(struct ata_host *host)
1021 struct ata_link *link;
1022 struct ata_port *ap;
1023 struct ata_device *dev;
1026 for (i = 0; i < host->n_ports; i++) {
1027 ap = host->ports[i];
1028 ata_port_for_each_link(link, ap) {
1029 ata_link_for_each_dev(dev, link)
1030 ata_dev_disable_pm(dev);
1035 static void ata_lpm_disable(struct ata_host *host)
1039 for (i = 0; i < host->n_ports; i++) {
1040 struct ata_port *ap = host->ports[i];
1041 ata_lpm_schedule(ap, ap->pm_policy);
1044 #endif /* CONFIG_PM */
1048 * ata_devchk - PATA device presence detection
1049 * @ap: ATA channel to examine
1050 * @device: Device to examine (starting at zero)
1052 * This technique was originally described in
1053 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1054 * later found its way into the ATA/ATAPI spec.
1056 * Write a pattern to the ATA shadow registers,
1057 * and if a device is present, it will respond by
1058 * correctly storing and echoing back the
1059 * ATA shadow register contents.
1065 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1067 struct ata_ioports *ioaddr = &ap->ioaddr;
1070 ap->ops->dev_select(ap, device);
1072 iowrite8(0x55, ioaddr->nsect_addr);
1073 iowrite8(0xaa, ioaddr->lbal_addr);
1075 iowrite8(0xaa, ioaddr->nsect_addr);
1076 iowrite8(0x55, ioaddr->lbal_addr);
1078 iowrite8(0x55, ioaddr->nsect_addr);
1079 iowrite8(0xaa, ioaddr->lbal_addr);
1081 nsect = ioread8(ioaddr->nsect_addr);
1082 lbal = ioread8(ioaddr->lbal_addr);
1084 if ((nsect == 0x55) && (lbal == 0xaa))
1085 return 1; /* we found a device */
1087 return 0; /* nothing found */
1091 * ata_dev_classify - determine device type based on ATA-spec signature
1092 * @tf: ATA taskfile register set for device to be identified
1094 * Determine from taskfile register contents whether a device is
1095 * ATA or ATAPI, as per "Signature and persistence" section
1096 * of ATA/PI spec (volume 1, sect 5.14).
1102 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1103 * %ATA_DEV_UNKNOWN the event of failure.
1105 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1107 /* Apple's open source Darwin code hints that some devices only
1108 * put a proper signature into the LBA mid/high registers,
1109 * So, we only check those. It's sufficient for uniqueness.
1111 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1112 * signatures for ATA and ATAPI devices attached on SerialATA,
1113 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1114 * spec has never mentioned about using different signatures
1115 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1116 * Multiplier specification began to use 0x69/0x96 to identify
1117 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1118 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1119 * 0x69/0x96 shortly and described them as reserved for
1122 * We follow the current spec and consider that 0x69/0x96
1123 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1125 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1126 DPRINTK("found ATA device by sig\n");
1130 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1131 DPRINTK("found ATAPI device by sig\n");
1132 return ATA_DEV_ATAPI;
1135 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1136 DPRINTK("found PMP device by sig\n");
1140 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1141 printk(KERN_INFO "ata: SEMB device ignored\n");
1142 return ATA_DEV_SEMB_UNSUP; /* not yet */
1145 DPRINTK("unknown device\n");
1146 return ATA_DEV_UNKNOWN;
1150 * ata_dev_try_classify - Parse returned ATA device signature
1151 * @dev: ATA device to classify (starting at zero)
1152 * @present: device seems present
1153 * @r_err: Value of error register on completion
1155 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1156 * an ATA/ATAPI-defined set of values is placed in the ATA
1157 * shadow registers, indicating the results of device detection
1160 * Select the ATA device, and read the values from the ATA shadow
1161 * registers. Then parse according to the Error register value,
1162 * and the spec-defined values examined by ata_dev_classify().
1168 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1170 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1173 struct ata_port *ap = dev->link->ap;
1174 struct ata_taskfile tf;
1178 ap->ops->dev_select(ap, dev->devno);
1180 memset(&tf, 0, sizeof(tf));
1182 ap->ops->tf_read(ap, &tf);
1187 /* see if device passed diags: continue and warn later */
1189 /* diagnostic fail : do nothing _YET_ */
1190 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1193 else if ((dev->devno == 0) && (err == 0x81))
1196 return ATA_DEV_NONE;
1198 /* determine if device is ATA or ATAPI */
1199 class = ata_dev_classify(&tf);
1201 if (class == ATA_DEV_UNKNOWN) {
1202 /* If the device failed diagnostic, it's likely to
1203 * have reported incorrect device signature too.
1204 * Assume ATA device if the device seems present but
1205 * device signature is invalid with diagnostic
1208 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1209 class = ATA_DEV_ATA;
1211 class = ATA_DEV_NONE;
1212 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1213 class = ATA_DEV_NONE;
1219 * ata_id_string - Convert IDENTIFY DEVICE page into string
1220 * @id: IDENTIFY DEVICE results we will examine
1221 * @s: string into which data is output
1222 * @ofs: offset into identify device page
1223 * @len: length of string to return. must be an even number.
1225 * The strings in the IDENTIFY DEVICE page are broken up into
1226 * 16-bit chunks. Run through the string, and output each
1227 * 8-bit chunk linearly, regardless of platform.
1233 void ata_id_string(const u16 *id, unsigned char *s,
1234 unsigned int ofs, unsigned int len)
1253 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1254 * @id: IDENTIFY DEVICE results we will examine
1255 * @s: string into which data is output
1256 * @ofs: offset into identify device page
1257 * @len: length of string to return. must be an odd number.
1259 * This function is identical to ata_id_string except that it
1260 * trims trailing spaces and terminates the resulting string with
1261 * null. @len must be actual maximum length (even number) + 1.
1266 void ata_id_c_string(const u16 *id, unsigned char *s,
1267 unsigned int ofs, unsigned int len)
1271 WARN_ON(!(len & 1));
1273 ata_id_string(id, s, ofs, len - 1);
1275 p = s + strnlen(s, len - 1);
1276 while (p > s && p[-1] == ' ')
1281 static u64 ata_id_n_sectors(const u16 *id)
1283 if (ata_id_has_lba(id)) {
1284 if (ata_id_has_lba48(id))
1285 return ata_id_u64(id, 100);
1287 return ata_id_u32(id, 60);
1289 if (ata_id_current_chs_valid(id))
1290 return ata_id_u32(id, 57);
1292 return id[1] * id[3] * id[6];
1296 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1300 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1301 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1302 sectors |= (tf->hob_lbal & 0xff) << 24;
1303 sectors |= (tf->lbah & 0xff) << 16;
1304 sectors |= (tf->lbam & 0xff) << 8;
1305 sectors |= (tf->lbal & 0xff);
1310 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1314 sectors |= (tf->device & 0x0f) << 24;
1315 sectors |= (tf->lbah & 0xff) << 16;
1316 sectors |= (tf->lbam & 0xff) << 8;
1317 sectors |= (tf->lbal & 0xff);
1323 * ata_read_native_max_address - Read native max address
1324 * @dev: target device
1325 * @max_sectors: out parameter for the result native max address
1327 * Perform an LBA48 or LBA28 native size query upon the device in
1331 * 0 on success, -EACCES if command is aborted by the drive.
1332 * -EIO on other errors.
1334 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1336 unsigned int err_mask;
1337 struct ata_taskfile tf;
1338 int lba48 = ata_id_has_lba48(dev->id);
1340 ata_tf_init(dev, &tf);
1342 /* always clear all address registers */
1343 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1346 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1347 tf.flags |= ATA_TFLAG_LBA48;
1349 tf.command = ATA_CMD_READ_NATIVE_MAX;
1351 tf.protocol |= ATA_PROT_NODATA;
1352 tf.device |= ATA_LBA;
1354 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1356 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1357 "max address (err_mask=0x%x)\n", err_mask);
1358 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1364 *max_sectors = ata_tf_to_lba48(&tf);
1366 *max_sectors = ata_tf_to_lba(&tf);
1367 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1373 * ata_set_max_sectors - Set max sectors
1374 * @dev: target device
1375 * @new_sectors: new max sectors value to set for the device
1377 * Set max sectors of @dev to @new_sectors.
1380 * 0 on success, -EACCES if command is aborted or denied (due to
1381 * previous non-volatile SET_MAX) by the drive. -EIO on other
1384 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1386 unsigned int err_mask;
1387 struct ata_taskfile tf;
1388 int lba48 = ata_id_has_lba48(dev->id);
1392 ata_tf_init(dev, &tf);
1394 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1397 tf.command = ATA_CMD_SET_MAX_EXT;
1398 tf.flags |= ATA_TFLAG_LBA48;
1400 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1401 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1402 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1404 tf.command = ATA_CMD_SET_MAX;
1406 tf.device |= (new_sectors >> 24) & 0xf;
1409 tf.protocol |= ATA_PROT_NODATA;
1410 tf.device |= ATA_LBA;
1412 tf.lbal = (new_sectors >> 0) & 0xff;
1413 tf.lbam = (new_sectors >> 8) & 0xff;
1414 tf.lbah = (new_sectors >> 16) & 0xff;
1416 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1418 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1419 "max address (err_mask=0x%x)\n", err_mask);
1420 if (err_mask == AC_ERR_DEV &&
1421 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1430 * ata_hpa_resize - Resize a device with an HPA set
1431 * @dev: Device to resize
1433 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1434 * it if required to the full size of the media. The caller must check
1435 * the drive has the HPA feature set enabled.
1438 * 0 on success, -errno on failure.
1440 static int ata_hpa_resize(struct ata_device *dev)
1442 struct ata_eh_context *ehc = &dev->link->eh_context;
1443 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1444 u64 sectors = ata_id_n_sectors(dev->id);
1448 /* do we need to do it? */
1449 if (dev->class != ATA_DEV_ATA ||
1450 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1451 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1454 /* read native max address */
1455 rc = ata_read_native_max_address(dev, &native_sectors);
1457 /* If device aborted the command or HPA isn't going to
1458 * be unlocked, skip HPA resizing.
1460 if (rc == -EACCES || !ata_ignore_hpa) {
1461 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1462 "broken, skipping HPA handling\n");
1463 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1465 /* we can continue if device aborted the command */
1473 /* nothing to do? */
1474 if (native_sectors <= sectors || !ata_ignore_hpa) {
1475 if (!print_info || native_sectors == sectors)
1478 if (native_sectors > sectors)
1479 ata_dev_printk(dev, KERN_INFO,
1480 "HPA detected: current %llu, native %llu\n",
1481 (unsigned long long)sectors,
1482 (unsigned long long)native_sectors);
1483 else if (native_sectors < sectors)
1484 ata_dev_printk(dev, KERN_WARNING,
1485 "native sectors (%llu) is smaller than "
1487 (unsigned long long)native_sectors,
1488 (unsigned long long)sectors);
1492 /* let's unlock HPA */
1493 rc = ata_set_max_sectors(dev, native_sectors);
1494 if (rc == -EACCES) {
1495 /* if device aborted the command, skip HPA resizing */
1496 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1497 "(%llu -> %llu), skipping HPA handling\n",
1498 (unsigned long long)sectors,
1499 (unsigned long long)native_sectors);
1500 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1505 /* re-read IDENTIFY data */
1506 rc = ata_dev_reread_id(dev, 0);
1508 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1509 "data after HPA resizing\n");
1514 u64 new_sectors = ata_id_n_sectors(dev->id);
1515 ata_dev_printk(dev, KERN_INFO,
1516 "HPA unlocked: %llu -> %llu, native %llu\n",
1517 (unsigned long long)sectors,
1518 (unsigned long long)new_sectors,
1519 (unsigned long long)native_sectors);
1526 * ata_noop_dev_select - Select device 0/1 on ATA bus
1527 * @ap: ATA channel to manipulate
1528 * @device: ATA device (numbered from zero) to select
1530 * This function performs no actual function.
1532 * May be used as the dev_select() entry in ata_port_operations.
1537 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1543 * ata_std_dev_select - Select device 0/1 on ATA bus
1544 * @ap: ATA channel to manipulate
1545 * @device: ATA device (numbered from zero) to select
1547 * Use the method defined in the ATA specification to
1548 * make either device 0, or device 1, active on the
1549 * ATA channel. Works with both PIO and MMIO.
1551 * May be used as the dev_select() entry in ata_port_operations.
1557 void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1562 tmp = ATA_DEVICE_OBS;
1564 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1566 iowrite8(tmp, ap->ioaddr.device_addr);
1567 ata_pause(ap); /* needed; also flushes, for mmio */
1571 * ata_dev_select - Select device 0/1 on ATA bus
1572 * @ap: ATA channel to manipulate
1573 * @device: ATA device (numbered from zero) to select
1574 * @wait: non-zero to wait for Status register BSY bit to clear
1575 * @can_sleep: non-zero if context allows sleeping
1577 * Use the method defined in the ATA specification to
1578 * make either device 0, or device 1, active on the
1581 * This is a high-level version of ata_std_dev_select(),
1582 * which additionally provides the services of inserting
1583 * the proper pauses and status polling, where needed.
1589 void ata_dev_select(struct ata_port *ap, unsigned int device,
1590 unsigned int wait, unsigned int can_sleep)
1592 if (ata_msg_probe(ap))
1593 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1594 "device %u, wait %u\n", device, wait);
1599 ap->ops->dev_select(ap, device);
1602 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1609 * ata_dump_id - IDENTIFY DEVICE info debugging output
1610 * @id: IDENTIFY DEVICE page to dump
1612 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1619 static inline void ata_dump_id(const u16 *id)
1621 DPRINTK("49==0x%04x "
1631 DPRINTK("80==0x%04x "
1641 DPRINTK("88==0x%04x "
1648 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1649 * @id: IDENTIFY data to compute xfer mask from
1651 * Compute the xfermask for this device. This is not as trivial
1652 * as it seems if we must consider early devices correctly.
1654 * FIXME: pre IDE drive timing (do we care ?).
1662 unsigned long ata_id_xfermask(const u16 *id)
1664 unsigned long pio_mask, mwdma_mask, udma_mask;
1666 /* Usual case. Word 53 indicates word 64 is valid */
1667 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1668 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1672 /* If word 64 isn't valid then Word 51 high byte holds
1673 * the PIO timing number for the maximum. Turn it into
1676 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1677 if (mode < 5) /* Valid PIO range */
1678 pio_mask = (2 << mode) - 1;
1682 /* But wait.. there's more. Design your standards by
1683 * committee and you too can get a free iordy field to
1684 * process. However its the speeds not the modes that
1685 * are supported... Note drivers using the timing API
1686 * will get this right anyway
1690 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1692 if (ata_id_is_cfa(id)) {
1694 * Process compact flash extended modes
1696 int pio = id[163] & 0x7;
1697 int dma = (id[163] >> 3) & 7;
1700 pio_mask |= (1 << 5);
1702 pio_mask |= (1 << 6);
1704 mwdma_mask |= (1 << 3);
1706 mwdma_mask |= (1 << 4);
1710 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1711 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1713 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1717 * ata_pio_queue_task - Queue port_task
1718 * @ap: The ata_port to queue port_task for
1719 * @fn: workqueue function to be scheduled
1720 * @data: data for @fn to use
1721 * @delay: delay time for workqueue function
1723 * Schedule @fn(@data) for execution after @delay jiffies using
1724 * port_task. There is one port_task per port and it's the
1725 * user(low level driver)'s responsibility to make sure that only
1726 * one task is active at any given time.
1728 * libata core layer takes care of synchronization between
1729 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1733 * Inherited from caller.
1735 static void ata_pio_queue_task(struct ata_port *ap, void *data,
1736 unsigned long delay)
1738 ap->port_task_data = data;
1740 /* may fail if ata_port_flush_task() in progress */
1741 queue_delayed_work(ata_wq, &ap->port_task, delay);
1745 * ata_port_flush_task - Flush port_task
1746 * @ap: The ata_port to flush port_task for
1748 * After this function completes, port_task is guranteed not to
1749 * be running or scheduled.
1752 * Kernel thread context (may sleep)
1754 void ata_port_flush_task(struct ata_port *ap)
1758 cancel_rearming_delayed_work(&ap->port_task);
1760 if (ata_msg_ctl(ap))
1761 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1764 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1766 struct completion *waiting = qc->private_data;
1772 * ata_exec_internal_sg - execute libata internal command
1773 * @dev: Device to which the command is sent
1774 * @tf: Taskfile registers for the command and the result
1775 * @cdb: CDB for packet command
1776 * @dma_dir: Data tranfer direction of the command
1777 * @sgl: sg list for the data buffer of the command
1778 * @n_elem: Number of sg entries
1779 * @timeout: Timeout in msecs (0 for default)
1781 * Executes libata internal command with timeout. @tf contains
1782 * command on entry and result on return. Timeout and error
1783 * conditions are reported via return value. No recovery action
1784 * is taken after a command times out. It's caller's duty to
1785 * clean up after timeout.
1788 * None. Should be called with kernel context, might sleep.
1791 * Zero on success, AC_ERR_* mask on failure
1793 unsigned ata_exec_internal_sg(struct ata_device *dev,
1794 struct ata_taskfile *tf, const u8 *cdb,
1795 int dma_dir, struct scatterlist *sgl,
1796 unsigned int n_elem, unsigned long timeout)
1798 struct ata_link *link = dev->link;
1799 struct ata_port *ap = link->ap;
1800 u8 command = tf->command;
1801 struct ata_queued_cmd *qc;
1802 unsigned int tag, preempted_tag;
1803 u32 preempted_sactive, preempted_qc_active;
1804 int preempted_nr_active_links;
1805 DECLARE_COMPLETION_ONSTACK(wait);
1806 unsigned long flags;
1807 unsigned int err_mask;
1810 spin_lock_irqsave(ap->lock, flags);
1812 /* no internal command while frozen */
1813 if (ap->pflags & ATA_PFLAG_FROZEN) {
1814 spin_unlock_irqrestore(ap->lock, flags);
1815 return AC_ERR_SYSTEM;
1818 /* initialize internal qc */
1820 /* XXX: Tag 0 is used for drivers with legacy EH as some
1821 * drivers choke if any other tag is given. This breaks
1822 * ata_tag_internal() test for those drivers. Don't use new
1823 * EH stuff without converting to it.
1825 if (ap->ops->error_handler)
1826 tag = ATA_TAG_INTERNAL;
1830 if (test_and_set_bit(tag, &ap->qc_allocated))
1832 qc = __ata_qc_from_tag(ap, tag);
1840 preempted_tag = link->active_tag;
1841 preempted_sactive = link->sactive;
1842 preempted_qc_active = ap->qc_active;
1843 preempted_nr_active_links = ap->nr_active_links;
1844 link->active_tag = ATA_TAG_POISON;
1847 ap->nr_active_links = 0;
1849 /* prepare & issue qc */
1852 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1853 qc->flags |= ATA_QCFLAG_RESULT_TF;
1854 qc->dma_dir = dma_dir;
1855 if (dma_dir != DMA_NONE) {
1856 unsigned int i, buflen = 0;
1857 struct scatterlist *sg;
1859 for_each_sg(sgl, sg, n_elem, i)
1860 buflen += sg->length;
1862 ata_sg_init(qc, sgl, n_elem);
1863 qc->nbytes = buflen;
1866 qc->private_data = &wait;
1867 qc->complete_fn = ata_qc_complete_internal;
1871 spin_unlock_irqrestore(ap->lock, flags);
1874 timeout = ata_probe_timeout * 1000 / HZ;
1876 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1878 ata_port_flush_task(ap);
1881 spin_lock_irqsave(ap->lock, flags);
1883 /* We're racing with irq here. If we lose, the
1884 * following test prevents us from completing the qc
1885 * twice. If we win, the port is frozen and will be
1886 * cleaned up by ->post_internal_cmd().
1888 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1889 qc->err_mask |= AC_ERR_TIMEOUT;
1891 if (ap->ops->error_handler)
1892 ata_port_freeze(ap);
1894 ata_qc_complete(qc);
1896 if (ata_msg_warn(ap))
1897 ata_dev_printk(dev, KERN_WARNING,
1898 "qc timeout (cmd 0x%x)\n", command);
1901 spin_unlock_irqrestore(ap->lock, flags);
1904 /* do post_internal_cmd */
1905 if (ap->ops->post_internal_cmd)
1906 ap->ops->post_internal_cmd(qc);
1908 /* perform minimal error analysis */
1909 if (qc->flags & ATA_QCFLAG_FAILED) {
1910 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1911 qc->err_mask |= AC_ERR_DEV;
1914 qc->err_mask |= AC_ERR_OTHER;
1916 if (qc->err_mask & ~AC_ERR_OTHER)
1917 qc->err_mask &= ~AC_ERR_OTHER;
1921 spin_lock_irqsave(ap->lock, flags);
1923 *tf = qc->result_tf;
1924 err_mask = qc->err_mask;
1927 link->active_tag = preempted_tag;
1928 link->sactive = preempted_sactive;
1929 ap->qc_active = preempted_qc_active;
1930 ap->nr_active_links = preempted_nr_active_links;
1932 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1933 * Until those drivers are fixed, we detect the condition
1934 * here, fail the command with AC_ERR_SYSTEM and reenable the
1937 * Note that this doesn't change any behavior as internal
1938 * command failure results in disabling the device in the
1939 * higher layer for LLDDs without new reset/EH callbacks.
1941 * Kill the following code as soon as those drivers are fixed.
1943 if (ap->flags & ATA_FLAG_DISABLED) {
1944 err_mask |= AC_ERR_SYSTEM;
1948 spin_unlock_irqrestore(ap->lock, flags);
1954 * ata_exec_internal - execute libata internal command
1955 * @dev: Device to which the command is sent
1956 * @tf: Taskfile registers for the command and the result
1957 * @cdb: CDB for packet command
1958 * @dma_dir: Data tranfer direction of the command
1959 * @buf: Data buffer of the command
1960 * @buflen: Length of data buffer
1961 * @timeout: Timeout in msecs (0 for default)
1963 * Wrapper around ata_exec_internal_sg() which takes simple
1964 * buffer instead of sg list.
1967 * None. Should be called with kernel context, might sleep.
1970 * Zero on success, AC_ERR_* mask on failure
1972 unsigned ata_exec_internal(struct ata_device *dev,
1973 struct ata_taskfile *tf, const u8 *cdb,
1974 int dma_dir, void *buf, unsigned int buflen,
1975 unsigned long timeout)
1977 struct scatterlist *psg = NULL, sg;
1978 unsigned int n_elem = 0;
1980 if (dma_dir != DMA_NONE) {
1982 sg_init_one(&sg, buf, buflen);
1987 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1992 * ata_do_simple_cmd - execute simple internal command
1993 * @dev: Device to which the command is sent
1994 * @cmd: Opcode to execute
1996 * Execute a 'simple' command, that only consists of the opcode
1997 * 'cmd' itself, without filling any other registers
2000 * Kernel thread context (may sleep).
2003 * Zero on success, AC_ERR_* mask on failure
2005 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
2007 struct ata_taskfile tf;
2009 ata_tf_init(dev, &tf);
2012 tf.flags |= ATA_TFLAG_DEVICE;
2013 tf.protocol = ATA_PROT_NODATA;
2015 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2019 * ata_pio_need_iordy - check if iordy needed
2022 * Check if the current speed of the device requires IORDY. Used
2023 * by various controllers for chip configuration.
2026 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2028 /* Controller doesn't support IORDY. Probably a pointless check
2029 as the caller should know this */
2030 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2032 /* PIO3 and higher it is mandatory */
2033 if (adev->pio_mode > XFER_PIO_2)
2035 /* We turn it on when possible */
2036 if (ata_id_has_iordy(adev->id))
2042 * ata_pio_mask_no_iordy - Return the non IORDY mask
2045 * Compute the highest mode possible if we are not using iordy. Return
2046 * -1 if no iordy mode is available.
2049 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2051 /* If we have no drive specific rule, then PIO 2 is non IORDY */
2052 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
2053 u16 pio = adev->id[ATA_ID_EIDE_PIO];
2054 /* Is the speed faster than the drive allows non IORDY ? */
2056 /* This is cycle times not frequency - watch the logic! */
2057 if (pio > 240) /* PIO2 is 240nS per cycle */
2058 return 3 << ATA_SHIFT_PIO;
2059 return 7 << ATA_SHIFT_PIO;
2062 return 3 << ATA_SHIFT_PIO;
2066 * ata_dev_read_id - Read ID data from the specified device
2067 * @dev: target device
2068 * @p_class: pointer to class of the target device (may be changed)
2069 * @flags: ATA_READID_* flags
2070 * @id: buffer to read IDENTIFY data into
2072 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2074 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2075 * for pre-ATA4 drives.
2077 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2078 * now we abort if we hit that case.
2081 * Kernel thread context (may sleep)
2084 * 0 on success, -errno otherwise.
2086 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2087 unsigned int flags, u16 *id)
2089 struct ata_port *ap = dev->link->ap;
2090 unsigned int class = *p_class;
2091 struct ata_taskfile tf;
2092 unsigned int err_mask = 0;
2094 int may_fallback = 1, tried_spinup = 0;
2097 if (ata_msg_ctl(ap))
2098 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2100 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
2102 ata_tf_init(dev, &tf);
2106 tf.command = ATA_CMD_ID_ATA;
2109 tf.command = ATA_CMD_ID_ATAPI;
2113 reason = "unsupported class";
2117 tf.protocol = ATA_PROT_PIO;
2119 /* Some devices choke if TF registers contain garbage. Make
2120 * sure those are properly initialized.
2122 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2124 /* Device presence detection is unreliable on some
2125 * controllers. Always poll IDENTIFY if available.
2127 tf.flags |= ATA_TFLAG_POLLING;
2129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2130 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2132 if (err_mask & AC_ERR_NODEV_HINT) {
2133 ata_dev_printk(dev, KERN_DEBUG,
2134 "NODEV after polling detection\n");
2138 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2139 /* Device or controller might have reported
2140 * the wrong device class. Give a shot at the
2141 * other IDENTIFY if the current one is
2142 * aborted by the device.
2147 if (class == ATA_DEV_ATA)
2148 class = ATA_DEV_ATAPI;
2150 class = ATA_DEV_ATA;
2154 /* Control reaches here iff the device aborted
2155 * both flavors of IDENTIFYs which happens
2156 * sometimes with phantom devices.
2158 ata_dev_printk(dev, KERN_DEBUG,
2159 "both IDENTIFYs aborted, assuming NODEV\n");
2164 reason = "I/O error";
2168 /* Falling back doesn't make sense if ID data was read
2169 * successfully at least once.
2173 swap_buf_le16(id, ATA_ID_WORDS);
2177 reason = "device reports invalid type";
2179 if (class == ATA_DEV_ATA) {
2180 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2183 if (ata_id_is_ata(id))
2187 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2190 * Drive powered-up in standby mode, and requires a specific
2191 * SET_FEATURES spin-up subcommand before it will accept
2192 * anything other than the original IDENTIFY command.
2194 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2195 if (err_mask && id[2] != 0x738c) {
2197 reason = "SPINUP failed";
2201 * If the drive initially returned incomplete IDENTIFY info,
2202 * we now must reissue the IDENTIFY command.
2204 if (id[2] == 0x37c8)
2208 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2210 * The exact sequence expected by certain pre-ATA4 drives is:
2212 * IDENTIFY (optional in early ATA)
2213 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2215 * Some drives were very specific about that exact sequence.
2217 * Note that ATA4 says lba is mandatory so the second check
2218 * shoud never trigger.
2220 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2221 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2224 reason = "INIT_DEV_PARAMS failed";
2228 /* current CHS translation info (id[53-58]) might be
2229 * changed. reread the identify device info.
2231 flags &= ~ATA_READID_POSTRESET;
2241 if (ata_msg_warn(ap))
2242 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2243 "(%s, err_mask=0x%x)\n", reason, err_mask);
2247 static inline u8 ata_dev_knobble(struct ata_device *dev)
2249 struct ata_port *ap = dev->link->ap;
2250 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2253 static void ata_dev_config_ncq(struct ata_device *dev,
2254 char *desc, size_t desc_sz)
2256 struct ata_port *ap = dev->link->ap;
2257 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2259 if (!ata_id_has_ncq(dev->id)) {
2263 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2264 snprintf(desc, desc_sz, "NCQ (not used)");
2267 if (ap->flags & ATA_FLAG_NCQ) {
2268 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2269 dev->flags |= ATA_DFLAG_NCQ;
2272 if (hdepth >= ddepth)
2273 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2275 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2279 * ata_dev_configure - Configure the specified ATA/ATAPI device
2280 * @dev: Target device to configure
2282 * Configure @dev according to @dev->id. Generic and low-level
2283 * driver specific fixups are also applied.
2286 * Kernel thread context (may sleep)
2289 * 0 on success, -errno otherwise
2291 int ata_dev_configure(struct ata_device *dev)
2293 struct ata_port *ap = dev->link->ap;
2294 struct ata_eh_context *ehc = &dev->link->eh_context;
2295 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2296 const u16 *id = dev->id;
2297 unsigned long xfer_mask;
2298 char revbuf[7]; /* XYZ-99\0 */
2299 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2300 char modelbuf[ATA_ID_PROD_LEN+1];
2303 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2304 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2309 if (ata_msg_probe(ap))
2310 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2313 dev->horkage |= ata_dev_blacklisted(dev);
2314 ata_force_horkage(dev);
2316 /* let ACPI work its magic */
2317 rc = ata_acpi_on_devcfg(dev);
2321 /* massage HPA, do it early as it might change IDENTIFY data */
2322 rc = ata_hpa_resize(dev);
2326 /* print device capabilities */
2327 if (ata_msg_probe(ap))
2328 ata_dev_printk(dev, KERN_DEBUG,
2329 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2330 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2332 id[49], id[82], id[83], id[84],
2333 id[85], id[86], id[87], id[88]);
2335 /* initialize to-be-configured parameters */
2336 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2337 dev->max_sectors = 0;
2345 * common ATA, ATAPI feature tests
2348 /* find max transfer mode; for printk only */
2349 xfer_mask = ata_id_xfermask(id);
2351 if (ata_msg_probe(ap))
2354 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2355 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2358 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2361 /* ATA-specific feature tests */
2362 if (dev->class == ATA_DEV_ATA) {
2363 if (ata_id_is_cfa(id)) {
2364 if (id[162] & 1) /* CPRM may make this media unusable */
2365 ata_dev_printk(dev, KERN_WARNING,
2366 "supports DRM functions and may "
2367 "not be fully accessable.\n");
2368 snprintf(revbuf, 7, "CFA");
2370 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2371 /* Warn the user if the device has TPM extensions */
2372 if (ata_id_has_tpm(id))
2373 ata_dev_printk(dev, KERN_WARNING,
2374 "supports DRM functions and may "
2375 "not be fully accessable.\n");
2378 dev->n_sectors = ata_id_n_sectors(id);
2380 if (dev->id[59] & 0x100)
2381 dev->multi_count = dev->id[59] & 0xff;
2383 if (ata_id_has_lba(id)) {
2384 const char *lba_desc;
2388 dev->flags |= ATA_DFLAG_LBA;
2389 if (ata_id_has_lba48(id)) {
2390 dev->flags |= ATA_DFLAG_LBA48;
2393 if (dev->n_sectors >= (1UL << 28) &&
2394 ata_id_has_flush_ext(id))
2395 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2399 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2401 /* print device info to dmesg */
2402 if (ata_msg_drv(ap) && print_info) {
2403 ata_dev_printk(dev, KERN_INFO,
2404 "%s: %s, %s, max %s\n",
2405 revbuf, modelbuf, fwrevbuf,
2406 ata_mode_string(xfer_mask));
2407 ata_dev_printk(dev, KERN_INFO,
2408 "%Lu sectors, multi %u: %s %s\n",
2409 (unsigned long long)dev->n_sectors,
2410 dev->multi_count, lba_desc, ncq_desc);
2415 /* Default translation */
2416 dev->cylinders = id[1];
2418 dev->sectors = id[6];
2420 if (ata_id_current_chs_valid(id)) {
2421 /* Current CHS translation is valid. */
2422 dev->cylinders = id[54];
2423 dev->heads = id[55];
2424 dev->sectors = id[56];
2427 /* print device info to dmesg */
2428 if (ata_msg_drv(ap) && print_info) {
2429 ata_dev_printk(dev, KERN_INFO,
2430 "%s: %s, %s, max %s\n",
2431 revbuf, modelbuf, fwrevbuf,
2432 ata_mode_string(xfer_mask));
2433 ata_dev_printk(dev, KERN_INFO,
2434 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2435 (unsigned long long)dev->n_sectors,
2436 dev->multi_count, dev->cylinders,
2437 dev->heads, dev->sectors);
2444 /* ATAPI-specific feature tests */
2445 else if (dev->class == ATA_DEV_ATAPI) {
2446 const char *cdb_intr_string = "";
2447 const char *atapi_an_string = "";
2448 const char *dma_dir_string = "";
2451 rc = atapi_cdb_len(id);
2452 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2453 if (ata_msg_warn(ap))
2454 ata_dev_printk(dev, KERN_WARNING,
2455 "unsupported CDB len\n");
2459 dev->cdb_len = (unsigned int) rc;
2461 /* Enable ATAPI AN if both the host and device have
2462 * the support. If PMP is attached, SNTF is required
2463 * to enable ATAPI AN to discern between PHY status
2464 * changed notifications and ATAPI ANs.
2466 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2467 (!ap->nr_pmp_links ||
2468 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2469 unsigned int err_mask;
2471 /* issue SET feature command to turn this on */
2472 err_mask = ata_dev_set_feature(dev,
2473 SETFEATURES_SATA_ENABLE, SATA_AN);
2475 ata_dev_printk(dev, KERN_ERR,
2476 "failed to enable ATAPI AN "
2477 "(err_mask=0x%x)\n", err_mask);
2479 dev->flags |= ATA_DFLAG_AN;
2480 atapi_an_string = ", ATAPI AN";
2484 if (ata_id_cdb_intr(dev->id)) {
2485 dev->flags |= ATA_DFLAG_CDB_INTR;
2486 cdb_intr_string = ", CDB intr";
2489 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2490 dev->flags |= ATA_DFLAG_DMADIR;
2491 dma_dir_string = ", DMADIR";
2494 /* print device info to dmesg */
2495 if (ata_msg_drv(ap) && print_info)
2496 ata_dev_printk(dev, KERN_INFO,
2497 "ATAPI: %s, %s, max %s%s%s%s\n",
2499 ata_mode_string(xfer_mask),
2500 cdb_intr_string, atapi_an_string,
2504 /* determine max_sectors */
2505 dev->max_sectors = ATA_MAX_SECTORS;
2506 if (dev->flags & ATA_DFLAG_LBA48)
2507 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2509 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2510 if (ata_id_has_hipm(dev->id))
2511 dev->flags |= ATA_DFLAG_HIPM;
2512 if (ata_id_has_dipm(dev->id))
2513 dev->flags |= ATA_DFLAG_DIPM;
2516 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2518 if (ata_dev_knobble(dev)) {
2519 if (ata_msg_drv(ap) && print_info)
2520 ata_dev_printk(dev, KERN_INFO,
2521 "applying bridge limits\n");
2522 dev->udma_mask &= ATA_UDMA5;
2523 dev->max_sectors = ATA_MAX_SECTORS;
2526 if ((dev->class == ATA_DEV_ATAPI) &&
2527 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2528 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2529 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2532 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2533 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2536 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2537 dev->horkage |= ATA_HORKAGE_IPM;
2539 /* reset link pm_policy for this port to no pm */
2540 ap->pm_policy = MAX_PERFORMANCE;
2543 if (ap->ops->dev_config)
2544 ap->ops->dev_config(dev);
2546 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2547 /* Let the user know. We don't want to disallow opens for
2548 rescue purposes, or in case the vendor is just a blithering
2549 idiot. Do this after the dev_config call as some controllers
2550 with buggy firmware may want to avoid reporting false device
2554 ata_dev_printk(dev, KERN_WARNING,
2555 "Drive reports diagnostics failure. This may indicate a drive\n");
2556 ata_dev_printk(dev, KERN_WARNING,
2557 "fault or invalid emulation. Contact drive vendor for information.\n");
2561 if (ata_msg_probe(ap))
2562 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2563 __func__, ata_chk_status(ap));
2567 if (ata_msg_probe(ap))
2568 ata_dev_printk(dev, KERN_DEBUG,
2569 "%s: EXIT, err\n", __func__);
2574 * ata_cable_40wire - return 40 wire cable type
2577 * Helper method for drivers which want to hardwire 40 wire cable
2581 int ata_cable_40wire(struct ata_port *ap)
2583 return ATA_CBL_PATA40;
2587 * ata_cable_80wire - return 80 wire cable type
2590 * Helper method for drivers which want to hardwire 80 wire cable
2594 int ata_cable_80wire(struct ata_port *ap)
2596 return ATA_CBL_PATA80;
2600 * ata_cable_unknown - return unknown PATA cable.
2603 * Helper method for drivers which have no PATA cable detection.
2606 int ata_cable_unknown(struct ata_port *ap)
2608 return ATA_CBL_PATA_UNK;
2612 * ata_cable_ignore - return ignored PATA cable.
2615 * Helper method for drivers which don't use cable type to limit
2618 int ata_cable_ignore(struct ata_port *ap)
2620 return ATA_CBL_PATA_IGN;
2624 * ata_cable_sata - return SATA cable type
2627 * Helper method for drivers which have SATA cables
2630 int ata_cable_sata(struct ata_port *ap)
2632 return ATA_CBL_SATA;
2636 * ata_bus_probe - Reset and probe ATA bus
2639 * Master ATA bus probing function. Initiates a hardware-dependent
2640 * bus reset, then attempts to identify any devices found on
2644 * PCI/etc. bus probe sem.
2647 * Zero on success, negative errno otherwise.
2650 int ata_bus_probe(struct ata_port *ap)
2652 unsigned int classes[ATA_MAX_DEVICES];
2653 int tries[ATA_MAX_DEVICES];
2655 struct ata_device *dev;
2659 ata_link_for_each_dev(dev, &ap->link)
2660 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2663 ata_link_for_each_dev(dev, &ap->link) {
2664 /* If we issue an SRST then an ATA drive (not ATAPI)
2665 * may change configuration and be in PIO0 timing. If
2666 * we do a hard reset (or are coming from power on)
2667 * this is true for ATA or ATAPI. Until we've set a
2668 * suitable controller mode we should not touch the
2669 * bus as we may be talking too fast.
2671 dev->pio_mode = XFER_PIO_0;
2673 /* If the controller has a pio mode setup function
2674 * then use it to set the chipset to rights. Don't
2675 * touch the DMA setup as that will be dealt with when
2676 * configuring devices.
2678 if (ap->ops->set_piomode)
2679 ap->ops->set_piomode(ap, dev);
2682 /* reset and determine device classes */
2683 ap->ops->phy_reset(ap);
2685 ata_link_for_each_dev(dev, &ap->link) {
2686 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2687 dev->class != ATA_DEV_UNKNOWN)
2688 classes[dev->devno] = dev->class;
2690 classes[dev->devno] = ATA_DEV_NONE;
2692 dev->class = ATA_DEV_UNKNOWN;
2697 /* read IDENTIFY page and configure devices. We have to do the identify
2698 specific sequence bass-ackwards so that PDIAG- is released by
2701 ata_link_for_each_dev_reverse(dev, &ap->link) {
2702 if (tries[dev->devno])
2703 dev->class = classes[dev->devno];
2705 if (!ata_dev_enabled(dev))
2708 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2714 /* Now ask for the cable type as PDIAG- should have been released */
2715 if (ap->ops->cable_detect)
2716 ap->cbl = ap->ops->cable_detect(ap);
2718 /* We may have SATA bridge glue hiding here irrespective of the
2719 reported cable types and sensed types */
2720 ata_link_for_each_dev(dev, &ap->link) {
2721 if (!ata_dev_enabled(dev))
2723 /* SATA drives indicate we have a bridge. We don't know which
2724 end of the link the bridge is which is a problem */
2725 if (ata_id_is_sata(dev->id))
2726 ap->cbl = ATA_CBL_SATA;
2729 /* After the identify sequence we can now set up the devices. We do
2730 this in the normal order so that the user doesn't get confused */
2732 ata_link_for_each_dev(dev, &ap->link) {
2733 if (!ata_dev_enabled(dev))
2736 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2737 rc = ata_dev_configure(dev);
2738 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2743 /* configure transfer mode */
2744 rc = ata_set_mode(&ap->link, &dev);
2748 ata_link_for_each_dev(dev, &ap->link)
2749 if (ata_dev_enabled(dev))
2752 /* no device present, disable port */
2753 ata_port_disable(ap);
2757 tries[dev->devno]--;
2761 /* eeek, something went very wrong, give up */
2762 tries[dev->devno] = 0;
2766 /* give it just one more chance */
2767 tries[dev->devno] = min(tries[dev->devno], 1);
2769 if (tries[dev->devno] == 1) {
2770 /* This is the last chance, better to slow
2771 * down than lose it.
2773 sata_down_spd_limit(&ap->link);
2774 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2778 if (!tries[dev->devno])
2779 ata_dev_disable(dev);
2785 * ata_port_probe - Mark port as enabled
2786 * @ap: Port for which we indicate enablement
2788 * Modify @ap data structure such that the system
2789 * thinks that the entire port is enabled.
2791 * LOCKING: host lock, or some other form of
2795 void ata_port_probe(struct ata_port *ap)
2797 ap->flags &= ~ATA_FLAG_DISABLED;
2801 * sata_print_link_status - Print SATA link status
2802 * @link: SATA link to printk link status about
2804 * This function prints link speed and status of a SATA link.
2809 void sata_print_link_status(struct ata_link *link)
2811 u32 sstatus, scontrol, tmp;
2813 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2815 sata_scr_read(link, SCR_CONTROL, &scontrol);
2817 if (ata_link_online(link)) {
2818 tmp = (sstatus >> 4) & 0xf;
2819 ata_link_printk(link, KERN_INFO,
2820 "SATA link up %s (SStatus %X SControl %X)\n",
2821 sata_spd_string(tmp), sstatus, scontrol);
2823 ata_link_printk(link, KERN_INFO,
2824 "SATA link down (SStatus %X SControl %X)\n",
2830 * ata_dev_pair - return other device on cable
2833 * Obtain the other device on the same cable, or if none is
2834 * present NULL is returned
2837 struct ata_device *ata_dev_pair(struct ata_device *adev)
2839 struct ata_link *link = adev->link;
2840 struct ata_device *pair = &link->device[1 - adev->devno];
2841 if (!ata_dev_enabled(pair))
2847 * ata_port_disable - Disable port.
2848 * @ap: Port to be disabled.
2850 * Modify @ap data structure such that the system
2851 * thinks that the entire port is disabled, and should
2852 * never attempt to probe or communicate with devices
2855 * LOCKING: host lock, or some other form of
2859 void ata_port_disable(struct ata_port *ap)
2861 ap->link.device[0].class = ATA_DEV_NONE;
2862 ap->link.device[1].class = ATA_DEV_NONE;
2863 ap->flags |= ATA_FLAG_DISABLED;
2867 * sata_down_spd_limit - adjust SATA spd limit downward
2868 * @link: Link to adjust SATA spd limit for
2870 * Adjust SATA spd limit of @link downward. Note that this
2871 * function only adjusts the limit. The change must be applied
2872 * using sata_set_spd().
2875 * Inherited from caller.
2878 * 0 on success, negative errno on failure
2880 int sata_down_spd_limit(struct ata_link *link)
2882 u32 sstatus, spd, mask;
2885 if (!sata_scr_valid(link))
2888 /* If SCR can be read, use it to determine the current SPD.
2889 * If not, use cached value in link->sata_spd.
2891 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2893 spd = (sstatus >> 4) & 0xf;
2895 spd = link->sata_spd;
2897 mask = link->sata_spd_limit;
2901 /* unconditionally mask off the highest bit */
2902 highbit = fls(mask) - 1;
2903 mask &= ~(1 << highbit);
2905 /* Mask off all speeds higher than or equal to the current
2906 * one. Force 1.5Gbps if current SPD is not available.
2909 mask &= (1 << (spd - 1)) - 1;
2913 /* were we already at the bottom? */
2917 link->sata_spd_limit = mask;
2919 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2920 sata_spd_string(fls(mask)));
2925 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2927 struct ata_link *host_link = &link->ap->link;
2928 u32 limit, target, spd;
2930 limit = link->sata_spd_limit;
2932 /* Don't configure downstream link faster than upstream link.
2933 * It doesn't speed up anything and some PMPs choke on such
2936 if (!ata_is_host_link(link) && host_link->sata_spd)
2937 limit &= (1 << host_link->sata_spd) - 1;
2939 if (limit == UINT_MAX)
2942 target = fls(limit);
2944 spd = (*scontrol >> 4) & 0xf;
2945 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2947 return spd != target;
2951 * sata_set_spd_needed - is SATA spd configuration needed
2952 * @link: Link in question
2954 * Test whether the spd limit in SControl matches
2955 * @link->sata_spd_limit. This function is used to determine
2956 * whether hardreset is necessary to apply SATA spd
2960 * Inherited from caller.
2963 * 1 if SATA spd configuration is needed, 0 otherwise.
2965 int sata_set_spd_needed(struct ata_link *link)
2969 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2972 return __sata_set_spd_needed(link, &scontrol);
2976 * sata_set_spd - set SATA spd according to spd limit
2977 * @link: Link to set SATA spd for
2979 * Set SATA spd of @link according to sata_spd_limit.
2982 * Inherited from caller.
2985 * 0 if spd doesn't need to be changed, 1 if spd has been
2986 * changed. Negative errno if SCR registers are inaccessible.
2988 int sata_set_spd(struct ata_link *link)
2993 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2996 if (!__sata_set_spd_needed(link, &scontrol))
2999 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3006 * This mode timing computation functionality is ported over from
3007 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3010 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3011 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3012 * for UDMA6, which is currently supported only by Maxtor drives.
3014 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3017 static const struct ata_timing ata_timing[] = {
3018 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3019 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3020 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3021 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3022 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3023 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3024 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3025 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
3027 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3028 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3029 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
3031 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3032 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3033 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
3034 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
3035 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
3037 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3038 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3039 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3040 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3041 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3042 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3043 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3044 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
3049 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3050 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3052 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3054 q->setup = EZ(t->setup * 1000, T);
3055 q->act8b = EZ(t->act8b * 1000, T);
3056 q->rec8b = EZ(t->rec8b * 1000, T);
3057 q->cyc8b = EZ(t->cyc8b * 1000, T);
3058 q->active = EZ(t->active * 1000, T);
3059 q->recover = EZ(t->recover * 1000, T);
3060 q->cycle = EZ(t->cycle * 1000, T);
3061 q->udma = EZ(t->udma * 1000, UT);
3064 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3065 struct ata_timing *m, unsigned int what)
3067 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3068 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3069 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3070 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3071 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3072 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3073 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3074 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3077 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3079 const struct ata_timing *t = ata_timing;
3081 while (xfer_mode > t->mode)
3084 if (xfer_mode == t->mode)
3089 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3090 struct ata_timing *t, int T, int UT)
3092 const struct ata_timing *s;
3093 struct ata_timing p;
3099 if (!(s = ata_timing_find_mode(speed)))
3102 memcpy(t, s, sizeof(*s));
3105 * If the drive is an EIDE drive, it can tell us it needs extended
3106 * PIO/MW_DMA cycle timing.
3109 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3110 memset(&p, 0, sizeof(p));
3111 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3112 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3113 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3114 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3115 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3117 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3121 * Convert the timing to bus clock counts.
3124 ata_timing_quantize(t, t, T, UT);
3127 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3128 * S.M.A.R.T * and some other commands. We have to ensure that the
3129 * DMA cycle timing is slower/equal than the fastest PIO timing.
3132 if (speed > XFER_PIO_6) {
3133 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3134 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3138 * Lengthen active & recovery time so that cycle time is correct.
3141 if (t->act8b + t->rec8b < t->cyc8b) {
3142 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3143 t->rec8b = t->cyc8b - t->act8b;
3146 if (t->active + t->recover < t->cycle) {
3147 t->active += (t->cycle - (t->active + t->recover)) / 2;
3148 t->recover = t->cycle - t->active;
3151 /* In a few cases quantisation may produce enough errors to
3152 leave t->cycle too low for the sum of active and recovery
3153 if so we must correct this */
3154 if (t->active + t->recover > t->cycle)
3155 t->cycle = t->active + t->recover;
3161 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3162 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3163 * @cycle: cycle duration in ns
3165 * Return matching xfer mode for @cycle. The returned mode is of
3166 * the transfer type specified by @xfer_shift. If @cycle is too
3167 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3168 * than the fastest known mode, the fasted mode is returned.
3174 * Matching xfer_mode, 0xff if no match found.
3176 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3178 u8 base_mode = 0xff, last_mode = 0xff;
3179 const struct ata_xfer_ent *ent;
3180 const struct ata_timing *t;
3182 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3183 if (ent->shift == xfer_shift)
3184 base_mode = ent->base;
3186 for (t = ata_timing_find_mode(base_mode);
3187 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3188 unsigned short this_cycle;
3190 switch (xfer_shift) {
3192 case ATA_SHIFT_MWDMA:
3193 this_cycle = t->cycle;
3195 case ATA_SHIFT_UDMA:
3196 this_cycle = t->udma;
3202 if (cycle > this_cycle)
3205 last_mode = t->mode;
3212 * ata_down_xfermask_limit - adjust dev xfer masks downward
3213 * @dev: Device to adjust xfer masks
3214 * @sel: ATA_DNXFER_* selector
3216 * Adjust xfer masks of @dev downward. Note that this function
3217 * does not apply the change. Invoking ata_set_mode() afterwards
3218 * will apply the limit.
3221 * Inherited from caller.
3224 * 0 on success, negative errno on failure
3226 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3229 unsigned long orig_mask, xfer_mask;
3230 unsigned long pio_mask, mwdma_mask, udma_mask;
3233 quiet = !!(sel & ATA_DNXFER_QUIET);
3234 sel &= ~ATA_DNXFER_QUIET;
3236 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3239 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3242 case ATA_DNXFER_PIO:
3243 highbit = fls(pio_mask) - 1;
3244 pio_mask &= ~(1 << highbit);
3247 case ATA_DNXFER_DMA:
3249 highbit = fls(udma_mask) - 1;
3250 udma_mask &= ~(1 << highbit);
3253 } else if (mwdma_mask) {
3254 highbit = fls(mwdma_mask) - 1;
3255 mwdma_mask &= ~(1 << highbit);
3261 case ATA_DNXFER_40C:
3262 udma_mask &= ATA_UDMA_MASK_40C;
3265 case ATA_DNXFER_FORCE_PIO0:
3267 case ATA_DNXFER_FORCE_PIO:
3276 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3278 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3282 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3283 snprintf(buf, sizeof(buf), "%s:%s",
3284 ata_mode_string(xfer_mask),
3285 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3287 snprintf(buf, sizeof(buf), "%s",
3288 ata_mode_string(xfer_mask));
3290 ata_dev_printk(dev, KERN_WARNING,
3291 "limiting speed to %s\n", buf);
3294 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3300 static int ata_dev_set_mode(struct ata_device *dev)
3302 struct ata_eh_context *ehc = &dev->link->eh_context;
3303 const char *dev_err_whine = "";
3304 int ign_dev_err = 0;
3305 unsigned int err_mask;
3308 dev->flags &= ~ATA_DFLAG_PIO;
3309 if (dev->xfer_shift == ATA_SHIFT_PIO)
3310 dev->flags |= ATA_DFLAG_PIO;
3312 err_mask = ata_dev_set_xfermode(dev);
3314 if (err_mask & ~AC_ERR_DEV)
3318 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3319 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3320 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3324 /* Old CFA may refuse this command, which is just fine */
3325 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3328 /* Some very old devices and some bad newer ones fail any kind of
3329 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3330 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3331 dev->pio_mode <= XFER_PIO_2)
3334 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3335 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3336 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3337 dev->dma_mode == XFER_MW_DMA_0 &&
3338 (dev->id[63] >> 8) & 1)
3341 /* if the device is actually configured correctly, ignore dev err */
3342 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3345 if (err_mask & AC_ERR_DEV) {
3349 dev_err_whine = " (device error ignored)";
3352 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3353 dev->xfer_shift, (int)dev->xfer_mode);
3355 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3356 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3362 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3363 "(err_mask=0x%x)\n", err_mask);
3368 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3369 * @link: link on which timings will be programmed
3370 * @r_failed_dev: out parameter for failed device
3372 * Standard implementation of the function used to tune and set
3373 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3374 * ata_dev_set_mode() fails, pointer to the failing device is
3375 * returned in @r_failed_dev.
3378 * PCI/etc. bus probe sem.
3381 * 0 on success, negative errno otherwise
3384 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3386 struct ata_port *ap = link->ap;
3387 struct ata_device *dev;
3388 int rc = 0, used_dma = 0, found = 0;
3390 /* step 1: calculate xfer_mask */
3391 ata_link_for_each_dev(dev, link) {
3392 unsigned long pio_mask, dma_mask;
3393 unsigned int mode_mask;
3395 if (!ata_dev_enabled(dev))
3398 mode_mask = ATA_DMA_MASK_ATA;
3399 if (dev->class == ATA_DEV_ATAPI)
3400 mode_mask = ATA_DMA_MASK_ATAPI;
3401 else if (ata_id_is_cfa(dev->id))
3402 mode_mask = ATA_DMA_MASK_CFA;
3404 ata_dev_xfermask(dev);
3405 ata_force_xfermask(dev);
3407 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3408 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3410 if (libata_dma_mask & mode_mask)
3411 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3415 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3416 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3419 if (dev->dma_mode != 0xff)
3425 /* step 2: always set host PIO timings */
3426 ata_link_for_each_dev(dev, link) {
3427 if (!ata_dev_enabled(dev))
3430 if (dev->pio_mode == 0xff) {
3431 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3436 dev->xfer_mode = dev->pio_mode;
3437 dev->xfer_shift = ATA_SHIFT_PIO;
3438 if (ap->ops->set_piomode)
3439 ap->ops->set_piomode(ap, dev);
3442 /* step 3: set host DMA timings */
3443 ata_link_for_each_dev(dev, link) {
3444 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3447 dev->xfer_mode = dev->dma_mode;
3448 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3449 if (ap->ops->set_dmamode)
3450 ap->ops->set_dmamode(ap, dev);
3453 /* step 4: update devices' xfer mode */
3454 ata_link_for_each_dev(dev, link) {
3455 /* don't update suspended devices' xfer mode */
3456 if (!ata_dev_enabled(dev))
3459 rc = ata_dev_set_mode(dev);
3464 /* Record simplex status. If we selected DMA then the other
3465 * host channels are not permitted to do so.
3467 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3468 ap->host->simplex_claimed = ap;
3472 *r_failed_dev = dev;
3477 * ata_tf_to_host - issue ATA taskfile to host controller
3478 * @ap: port to which command is being issued
3479 * @tf: ATA taskfile register set
3481 * Issues ATA taskfile register set to ATA host controller,
3482 * with proper synchronization with interrupt handler and
3486 * spin_lock_irqsave(host lock)
3489 static inline void ata_tf_to_host(struct ata_port *ap,
3490 const struct ata_taskfile *tf)
3492 ap->ops->tf_load(ap, tf);
3493 ap->ops->exec_command(ap, tf);
3497 * ata_busy_sleep - sleep until BSY clears, or timeout
3498 * @ap: port containing status register to be polled
3499 * @tmout_pat: impatience timeout
3500 * @tmout: overall timeout
3502 * Sleep until ATA Status register bit BSY clears,
3503 * or a timeout occurs.
3506 * Kernel thread context (may sleep).
3509 * 0 on success, -errno otherwise.
3511 int ata_busy_sleep(struct ata_port *ap,
3512 unsigned long tmout_pat, unsigned long tmout)
3514 unsigned long timer_start, timeout;
3517 status = ata_busy_wait(ap, ATA_BUSY, 300);
3518 timer_start = jiffies;
3519 timeout = timer_start + tmout_pat;
3520 while (status != 0xff && (status & ATA_BUSY) &&
3521 time_before(jiffies, timeout)) {
3523 status = ata_busy_wait(ap, ATA_BUSY, 3);
3526 if (status != 0xff && (status & ATA_BUSY))
3527 ata_port_printk(ap, KERN_WARNING,
3528 "port is slow to respond, please be patient "
3529 "(Status 0x%x)\n", status);
3531 timeout = timer_start + tmout;
3532 while (status != 0xff && (status & ATA_BUSY) &&
3533 time_before(jiffies, timeout)) {
3535 status = ata_chk_status(ap);
3541 if (status & ATA_BUSY) {
3542 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3543 "(%lu secs, Status 0x%x)\n",
3544 tmout / HZ, status);
3552 * ata_wait_after_reset - wait before checking status after reset
3553 * @ap: port containing status register to be polled
3554 * @deadline: deadline jiffies for the operation
3556 * After reset, we need to pause a while before reading status.
3557 * Also, certain combination of controller and device report 0xff
3558 * for some duration (e.g. until SATA PHY is up and running)
3559 * which is interpreted as empty port in ATA world. This
3560 * function also waits for such devices to get out of 0xff
3564 * Kernel thread context (may sleep).
3566 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3568 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3570 if (time_before(until, deadline))
3573 /* Spec mandates ">= 2ms" before checking status. We wait
3574 * 150ms, because that was the magic delay used for ATAPI
3575 * devices in Hale Landis's ATADRVR, for the period of time
3576 * between when the ATA command register is written, and then
3577 * status is checked. Because waiting for "a while" before
3578 * checking status is fine, post SRST, we perform this magic
3579 * delay here as well.
3581 * Old drivers/ide uses the 2mS rule and then waits for ready.
3585 /* Wait for 0xff to clear. Some SATA devices take a long time
3586 * to clear 0xff after reset. For example, HHD424020F7SV00
3587 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3590 * Note that some PATA controllers (pata_ali) explode if
3591 * status register is read more than once when there's no
3594 if (ap->flags & ATA_FLAG_SATA) {
3596 u8 status = ata_chk_status(ap);
3598 if (status != 0xff || time_after(jiffies, deadline))
3607 * ata_wait_ready - sleep until BSY clears, or timeout
3608 * @ap: port containing status register to be polled
3609 * @deadline: deadline jiffies for the operation
3611 * Sleep until ATA Status register bit BSY clears, or timeout
3615 * Kernel thread context (may sleep).
3618 * 0 on success, -errno otherwise.
3620 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3622 unsigned long start = jiffies;
3626 u8 status = ata_chk_status(ap);
3627 unsigned long now = jiffies;
3629 if (!(status & ATA_BUSY))
3631 if (!ata_link_online(&ap->link) && status == 0xff)
3633 if (time_after(now, deadline))
3636 if (!warned && time_after(now, start + 5 * HZ) &&
3637 (deadline - now > 3 * HZ)) {
3638 ata_port_printk(ap, KERN_WARNING,
3639 "port is slow to respond, please be patient "
3640 "(Status 0x%x)\n", status);
3648 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3649 unsigned long deadline)
3651 struct ata_ioports *ioaddr = &ap->ioaddr;
3652 unsigned int dev0 = devmask & (1 << 0);
3653 unsigned int dev1 = devmask & (1 << 1);
3656 /* if device 0 was found in ata_devchk, wait for its
3660 rc = ata_wait_ready(ap, deadline);
3668 /* if device 1 was found in ata_devchk, wait for register
3669 * access briefly, then wait for BSY to clear.
3674 ap->ops->dev_select(ap, 1);
3676 /* Wait for register access. Some ATAPI devices fail
3677 * to set nsect/lbal after reset, so don't waste too
3678 * much time on it. We're gonna wait for !BSY anyway.
3680 for (i = 0; i < 2; i++) {
3683 nsect = ioread8(ioaddr->nsect_addr);
3684 lbal = ioread8(ioaddr->lbal_addr);
3685 if ((nsect == 1) && (lbal == 1))
3687 msleep(50); /* give drive a breather */
3690 rc = ata_wait_ready(ap, deadline);
3698 /* is all this really necessary? */
3699 ap->ops->dev_select(ap, 0);
3701 ap->ops->dev_select(ap, 1);
3703 ap->ops->dev_select(ap, 0);
3708 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3709 unsigned long deadline)
3711 struct ata_ioports *ioaddr = &ap->ioaddr;
3713 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3715 /* software reset. causes dev0 to be selected */
3716 iowrite8(ap->ctl, ioaddr->ctl_addr);
3717 udelay(20); /* FIXME: flush */
3718 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3719 udelay(20); /* FIXME: flush */
3720 iowrite8(ap->ctl, ioaddr->ctl_addr);
3722 /* wait a while before checking status */
3723 ata_wait_after_reset(ap, deadline);
3725 /* Before we perform post reset processing we want to see if
3726 * the bus shows 0xFF because the odd clown forgets the D7
3727 * pulldown resistor.
3729 if (ata_chk_status(ap) == 0xFF)
3732 return ata_bus_post_reset(ap, devmask, deadline);
3736 * ata_bus_reset - reset host port and associated ATA channel
3737 * @ap: port to reset
3739 * This is typically the first time we actually start issuing
3740 * commands to the ATA channel. We wait for BSY to clear, then
3741 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3742 * result. Determine what devices, if any, are on the channel
3743 * by looking at the device 0/1 error register. Look at the signature
3744 * stored in each device's taskfile registers, to determine if
3745 * the device is ATA or ATAPI.
3748 * PCI/etc. bus probe sem.
3749 * Obtains host lock.
3752 * Sets ATA_FLAG_DISABLED if bus reset fails.
3755 void ata_bus_reset(struct ata_port *ap)
3757 struct ata_device *device = ap->link.device;
3758 struct ata_ioports *ioaddr = &ap->ioaddr;
3759 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3761 unsigned int dev0, dev1 = 0, devmask = 0;
3764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3766 /* determine if device 0/1 are present */
3767 if (ap->flags & ATA_FLAG_SATA_RESET)
3770 dev0 = ata_devchk(ap, 0);
3772 dev1 = ata_devchk(ap, 1);
3776 devmask |= (1 << 0);
3778 devmask |= (1 << 1);
3780 /* select device 0 again */
3781 ap->ops->dev_select(ap, 0);
3783 /* issue bus reset */
3784 if (ap->flags & ATA_FLAG_SRST) {
3785 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3786 if (rc && rc != -ENODEV)
3791 * determine by signature whether we have ATA or ATAPI devices
3793 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3794 if ((slave_possible) && (err != 0x81))
3795 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3797 /* is double-select really necessary? */
3798 if (device[1].class != ATA_DEV_NONE)
3799 ap->ops->dev_select(ap, 1);
3800 if (device[0].class != ATA_DEV_NONE)
3801 ap->ops->dev_select(ap, 0);
3803 /* if no devices were detected, disable this port */
3804 if ((device[0].class == ATA_DEV_NONE) &&
3805 (device[1].class == ATA_DEV_NONE))
3808 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3809 /* set up device control for ATA_FLAG_SATA_RESET */
3810 iowrite8(ap->ctl, ioaddr->ctl_addr);
3817 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3818 ata_port_disable(ap);
3824 * sata_link_debounce - debounce SATA phy status
3825 * @link: ATA link to debounce SATA phy status for
3826 * @params: timing parameters { interval, duratinon, timeout } in msec
3827 * @deadline: deadline jiffies for the operation
3829 * Make sure SStatus of @link reaches stable state, determined by
3830 * holding the same value where DET is not 1 for @duration polled
3831 * every @interval, before @timeout. Timeout constraints the
3832 * beginning of the stable state. Because DET gets stuck at 1 on
3833 * some controllers after hot unplugging, this functions waits
3834 * until timeout then returns 0 if DET is stable at 1.
3836 * @timeout is further limited by @deadline. The sooner of the
3840 * Kernel thread context (may sleep)
3843 * 0 on success, -errno on failure.
3845 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3846 unsigned long deadline)
3848 unsigned long interval_msec = params[0];
3849 unsigned long duration = msecs_to_jiffies(params[1]);
3850 unsigned long last_jiffies, t;
3854 t = jiffies + msecs_to_jiffies(params[2]);
3855 if (time_before(t, deadline))
3858 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3863 last_jiffies = jiffies;
3866 msleep(interval_msec);
3867 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3873 if (cur == 1 && time_before(jiffies, deadline))
3875 if (time_after(jiffies, last_jiffies + duration))
3880 /* unstable, start over */
3882 last_jiffies = jiffies;
3884 /* Check deadline. If debouncing failed, return
3885 * -EPIPE to tell upper layer to lower link speed.
3887 if (time_after(jiffies, deadline))
3893 * sata_link_resume - resume SATA link
3894 * @link: ATA link to resume SATA
3895 * @params: timing parameters { interval, duratinon, timeout } in msec
3896 * @deadline: deadline jiffies for the operation
3898 * Resume SATA phy @link and debounce it.
3901 * Kernel thread context (may sleep)
3904 * 0 on success, -errno on failure.
3906 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3907 unsigned long deadline)
3912 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3915 scontrol = (scontrol & 0x0f0) | 0x300;
3917 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3920 /* Some PHYs react badly if SStatus is pounded immediately
3921 * after resuming. Delay 200ms before debouncing.
3925 return sata_link_debounce(link, params, deadline);
3929 * ata_std_prereset - prepare for reset
3930 * @link: ATA link to be reset
3931 * @deadline: deadline jiffies for the operation
3933 * @link is about to be reset. Initialize it. Failure from
3934 * prereset makes libata abort whole reset sequence and give up
3935 * that port, so prereset should be best-effort. It does its
3936 * best to prepare for reset sequence but if things go wrong, it
3937 * should just whine, not fail.
3940 * Kernel thread context (may sleep)
3943 * 0 on success, -errno otherwise.
3945 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3947 struct ata_port *ap = link->ap;
3948 struct ata_eh_context *ehc = &link->eh_context;
3949 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3952 /* handle link resume */
3953 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3954 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3955 ehc->i.action |= ATA_EH_HARDRESET;
3957 /* Some PMPs don't work with only SRST, force hardreset if PMP
3960 if (ap->flags & ATA_FLAG_PMP)
3961 ehc->i.action |= ATA_EH_HARDRESET;
3963 /* if we're about to do hardreset, nothing more to do */
3964 if (ehc->i.action & ATA_EH_HARDRESET)
3967 /* if SATA, resume link */
3968 if (ap->flags & ATA_FLAG_SATA) {
3969 rc = sata_link_resume(link, timing, deadline);
3970 /* whine about phy resume failure but proceed */
3971 if (rc && rc != -EOPNOTSUPP)
3972 ata_link_printk(link, KERN_WARNING, "failed to resume "
3973 "link for reset (errno=%d)\n", rc);
3976 /* Wait for !BSY if the controller can wait for the first D2H
3977 * Reg FIS and we don't know that no device is attached.
3979 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3980 rc = ata_wait_ready(ap, deadline);
3981 if (rc && rc != -ENODEV) {
3982 ata_link_printk(link, KERN_WARNING, "device not ready "
3983 "(errno=%d), forcing hardreset\n", rc);
3984 ehc->i.action |= ATA_EH_HARDRESET;
3992 * ata_std_softreset - reset host port via ATA SRST
3993 * @link: ATA link to reset
3994 * @classes: resulting classes of attached devices
3995 * @deadline: deadline jiffies for the operation
3997 * Reset host port using ATA SRST.
4000 * Kernel thread context (may sleep)
4003 * 0 on success, -errno otherwise.
4005 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
4006 unsigned long deadline)
4008 struct ata_port *ap = link->ap;
4009 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
4010 unsigned int devmask = 0;
4016 if (ata_link_offline(link)) {
4017 classes[0] = ATA_DEV_NONE;
4021 /* determine if device 0/1 are present */
4022 if (ata_devchk(ap, 0))
4023 devmask |= (1 << 0);
4024 if (slave_possible && ata_devchk(ap, 1))
4025 devmask |= (1 << 1);
4027 /* select device 0 again */
4028 ap->ops->dev_select(ap, 0);
4030 /* issue bus reset */
4031 DPRINTK("about to softreset, devmask=%x\n", devmask);
4032 rc = ata_bus_softreset(ap, devmask, deadline);
4033 /* if link is occupied, -ENODEV too is an error */
4034 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
4035 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
4039 /* determine by signature whether we have ATA or ATAPI devices */
4040 classes[0] = ata_dev_try_classify(&link->device[0],
4041 devmask & (1 << 0), &err);
4042 if (slave_possible && err != 0x81)
4043 classes[1] = ata_dev_try_classify(&link->device[1],
4044 devmask & (1 << 1), &err);
4047 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4052 * sata_link_hardreset - reset link via SATA phy reset
4053 * @link: link to reset
4054 * @timing: timing parameters { interval, duratinon, timeout } in msec
4055 * @deadline: deadline jiffies for the operation
4057 * SATA phy-reset @link using DET bits of SControl register.
4060 * Kernel thread context (may sleep)
4063 * 0 on success, -errno otherwise.
4065 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4066 unsigned long deadline)
4073 if (sata_set_spd_needed(link)) {
4074 /* SATA spec says nothing about how to reconfigure
4075 * spd. To be on the safe side, turn off phy during
4076 * reconfiguration. This works for at least ICH7 AHCI
4079 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4082 scontrol = (scontrol & 0x0f0) | 0x304;
4084 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4090 /* issue phy wake/reset */
4091 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4094 scontrol = (scontrol & 0x0f0) | 0x301;
4096 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4099 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4100 * 10.4.2 says at least 1 ms.
4104 /* bring link back */
4105 rc = sata_link_resume(link, timing, deadline);
4107 DPRINTK("EXIT, rc=%d\n", rc);
4112 * sata_std_hardreset - reset host port via SATA phy reset
4113 * @link: link to reset
4114 * @class: resulting class of attached device
4115 * @deadline: deadline jiffies for the operation
4117 * SATA phy-reset host port using DET bits of SControl register,
4118 * wait for !BSY and classify the attached device.
4121 * Kernel thread context (may sleep)
4124 * 0 on success, -errno otherwise.
4126 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4127 unsigned long deadline)
4129 struct ata_port *ap = link->ap;
4130 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4136 rc = sata_link_hardreset(link, timing, deadline);
4138 ata_link_printk(link, KERN_ERR,
4139 "COMRESET failed (errno=%d)\n", rc);
4143 /* TODO: phy layer with polling, timeouts, etc. */
4144 if (ata_link_offline(link)) {
4145 *class = ATA_DEV_NONE;
4146 DPRINTK("EXIT, link offline\n");
4150 /* wait a while before checking status */
4151 ata_wait_after_reset(ap, deadline);
4153 /* If PMP is supported, we have to do follow-up SRST. Note
4154 * that some PMPs don't send D2H Reg FIS after hardreset at
4155 * all if the first port is empty. Wait for it just for a
4156 * second and request follow-up SRST.
4158 if (ap->flags & ATA_FLAG_PMP) {
4159 ata_wait_ready(ap, jiffies + HZ);
4163 rc = ata_wait_ready(ap, deadline);
4164 /* link occupied, -ENODEV too is an error */
4166 ata_link_printk(link, KERN_ERR,
4167 "COMRESET failed (errno=%d)\n", rc);
4171 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4173 *class = ata_dev_try_classify(link->device, 1, NULL);
4175 DPRINTK("EXIT, class=%u\n", *class);
4180 * ata_std_postreset - standard postreset callback
4181 * @link: the target ata_link
4182 * @classes: classes of attached devices
4184 * This function is invoked after a successful reset. Note that
4185 * the device might have been reset more than once using
4186 * different reset methods before postreset is invoked.
4189 * Kernel thread context (may sleep)
4191 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4193 struct ata_port *ap = link->ap;
4198 /* print link status */
4199 sata_print_link_status(link);
4202 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4203 sata_scr_write(link, SCR_ERROR, serror);
4204 link->eh_info.serror = 0;
4206 /* is double-select really necessary? */
4207 if (classes[0] != ATA_DEV_NONE)
4208 ap->ops->dev_select(ap, 1);
4209 if (classes[1] != ATA_DEV_NONE)
4210 ap->ops->dev_select(ap, 0);
4212 /* bail out if no device is present */
4213 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4214 DPRINTK("EXIT, no device\n");
4218 /* set up device control */
4219 if (ap->ioaddr.ctl_addr)
4220 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
4226 * ata_dev_same_device - Determine whether new ID matches configured device
4227 * @dev: device to compare against
4228 * @new_class: class of the new device
4229 * @new_id: IDENTIFY page of the new device
4231 * Compare @new_class and @new_id against @dev and determine
4232 * whether @dev is the device indicated by @new_class and
4239 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4241 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4244 const u16 *old_id = dev->id;
4245 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4246 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4248 if (dev->class != new_class) {
4249 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4250 dev->class, new_class);
4254 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4255 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4256 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4257 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4259 if (strcmp(model[0], model[1])) {
4260 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4261 "'%s' != '%s'\n", model[0], model[1]);
4265 if (strcmp(serial[0], serial[1])) {
4266 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4267 "'%s' != '%s'\n", serial[0], serial[1]);
4275 * ata_dev_reread_id - Re-read IDENTIFY data
4276 * @dev: target ATA device
4277 * @readid_flags: read ID flags
4279 * Re-read IDENTIFY page and make sure @dev is still attached to
4283 * Kernel thread context (may sleep)
4286 * 0 on success, negative errno otherwise
4288 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4290 unsigned int class = dev->class;
4291 u16 *id = (void *)dev->link->ap->sector_buf;
4295 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4299 /* is the device still there? */
4300 if (!ata_dev_same_device(dev, class, id))
4303 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4308 * ata_dev_revalidate - Revalidate ATA device
4309 * @dev: device to revalidate
4310 * @new_class: new class code
4311 * @readid_flags: read ID flags
4313 * Re-read IDENTIFY page, make sure @dev is still attached to the
4314 * port and reconfigure it according to the new IDENTIFY page.
4317 * Kernel thread context (may sleep)
4320 * 0 on success, negative errno otherwise
4322 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4323 unsigned int readid_flags)
4325 u64 n_sectors = dev->n_sectors;
4328 if (!ata_dev_enabled(dev))
4331 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4332 if (ata_class_enabled(new_class) &&
4333 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4334 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4335 dev->class, new_class);
4341 rc = ata_dev_reread_id(dev, readid_flags);
4345 /* configure device according to the new ID */
4346 rc = ata_dev_configure(dev);
4350 /* verify n_sectors hasn't changed */
4351 if (dev->class == ATA_DEV_ATA && n_sectors &&
4352 dev->n_sectors != n_sectors) {
4353 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4355 (unsigned long long)n_sectors,
4356 (unsigned long long)dev->n_sectors);
4358 /* restore original n_sectors */
4359 dev->n_sectors = n_sectors;
4368 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4372 struct ata_blacklist_entry {
4373 const char *model_num;
4374 const char *model_rev;
4375 unsigned long horkage;
4378 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4379 /* Devices with DMA related problems under Linux */
4380 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4381 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4382 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4383 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4384 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4385 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4386 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4387 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4388 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4389 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4390 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4391 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4392 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4393 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4394 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4395 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4396 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4397 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4398 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4399 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4400 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4401 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4402 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4403 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4404 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4405 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4406 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4407 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4408 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4409 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4410 /* Odd clown on sil3726/4726 PMPs */
4411 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4412 ATA_HORKAGE_SKIP_PM },
4414 /* Weird ATAPI devices */
4415 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4417 /* Devices we expect to fail diagnostics */
4419 /* Devices where NCQ should be avoided */
4421 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4422 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4423 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4424 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4426 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4427 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4428 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4429 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4431 /* Blacklist entries taken from Silicon Image 3124/3132
4432 Windows driver .inf file - also several Linux problem reports */
4433 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4434 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4435 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4437 /* devices which puke on READ_NATIVE_MAX */
4438 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4439 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4440 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4441 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4443 /* Devices which report 1 sector over size HPA */
4444 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4445 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4446 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4448 /* Devices which get the IVB wrong */
4449 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4450 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4451 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4452 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4453 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4459 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4465 * check for trailing wildcard: *\0
4467 p = strchr(patt, wildchar);
4468 if (p && ((*(p + 1)) == 0))
4479 return strncmp(patt, name, len);
4482 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4484 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4485 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4486 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4488 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4489 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4491 while (ad->model_num) {
4492 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4493 if (ad->model_rev == NULL)
4495 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4503 static int ata_dma_blacklisted(const struct ata_device *dev)
4505 /* We don't support polling DMA.
4506 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4507 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4509 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4510 (dev->flags & ATA_DFLAG_CDB_INTR))
4512 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4516 * ata_is_40wire - check drive side detection
4519 * Perform drive side detection decoding, allowing for device vendors
4520 * who can't follow the documentation.
4523 static int ata_is_40wire(struct ata_device *dev)
4525 if (dev->horkage & ATA_HORKAGE_IVB)
4526 return ata_drive_40wire_relaxed(dev->id);
4527 return ata_drive_40wire(dev->id);
4531 * ata_dev_xfermask - Compute supported xfermask of the given device
4532 * @dev: Device to compute xfermask for
4534 * Compute supported xfermask of @dev and store it in
4535 * dev->*_mask. This function is responsible for applying all
4536 * known limits including host controller limits, device
4542 static void ata_dev_xfermask(struct ata_device *dev)
4544 struct ata_link *link = dev->link;
4545 struct ata_port *ap = link->ap;
4546 struct ata_host *host = ap->host;
4547 unsigned long xfer_mask;
4549 /* controller modes available */
4550 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4551 ap->mwdma_mask, ap->udma_mask);
4553 /* drive modes available */
4554 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4555 dev->mwdma_mask, dev->udma_mask);
4556 xfer_mask &= ata_id_xfermask(dev->id);
4559 * CFA Advanced TrueIDE timings are not allowed on a shared
4562 if (ata_dev_pair(dev)) {
4563 /* No PIO5 or PIO6 */
4564 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4565 /* No MWDMA3 or MWDMA 4 */
4566 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4569 if (ata_dma_blacklisted(dev)) {
4570 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4571 ata_dev_printk(dev, KERN_WARNING,
4572 "device is on DMA blacklist, disabling DMA\n");
4575 if ((host->flags & ATA_HOST_SIMPLEX) &&
4576 host->simplex_claimed && host->simplex_claimed != ap) {
4577 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4578 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4579 "other device, disabling DMA\n");
4582 if (ap->flags & ATA_FLAG_NO_IORDY)
4583 xfer_mask &= ata_pio_mask_no_iordy(dev);
4585 if (ap->ops->mode_filter)
4586 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4588 /* Apply cable rule here. Don't apply it early because when
4589 * we handle hot plug the cable type can itself change.
4590 * Check this last so that we know if the transfer rate was
4591 * solely limited by the cable.
4592 * Unknown or 80 wire cables reported host side are checked
4593 * drive side as well. Cases where we know a 40wire cable
4594 * is used safely for 80 are not checked here.
4596 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4597 /* UDMA/44 or higher would be available */
4598 if ((ap->cbl == ATA_CBL_PATA40) ||
4599 (ata_is_40wire(dev) &&
4600 (ap->cbl == ATA_CBL_PATA_UNK ||
4601 ap->cbl == ATA_CBL_PATA80))) {
4602 ata_dev_printk(dev, KERN_WARNING,
4603 "limited to UDMA/33 due to 40-wire cable\n");
4604 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4607 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4608 &dev->mwdma_mask, &dev->udma_mask);
4612 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4613 * @dev: Device to which command will be sent
4615 * Issue SET FEATURES - XFER MODE command to device @dev
4619 * PCI/etc. bus probe sem.
4622 * 0 on success, AC_ERR_* mask otherwise.
4625 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4627 struct ata_taskfile tf;
4628 unsigned int err_mask;
4630 /* set up set-features taskfile */
4631 DPRINTK("set features - xfer mode\n");
4633 /* Some controllers and ATAPI devices show flaky interrupt
4634 * behavior after setting xfer mode. Use polling instead.
4636 ata_tf_init(dev, &tf);
4637 tf.command = ATA_CMD_SET_FEATURES;
4638 tf.feature = SETFEATURES_XFER;
4639 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4640 tf.protocol = ATA_PROT_NODATA;
4641 /* If we are using IORDY we must send the mode setting command */
4642 if (ata_pio_need_iordy(dev))
4643 tf.nsect = dev->xfer_mode;
4644 /* If the device has IORDY and the controller does not - turn it off */
4645 else if (ata_id_has_iordy(dev->id))
4647 else /* In the ancient relic department - skip all of this */
4650 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4652 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4656 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4657 * @dev: Device to which command will be sent
4658 * @enable: Whether to enable or disable the feature
4659 * @feature: The sector count represents the feature to set
4661 * Issue SET FEATURES - SATA FEATURES command to device @dev
4662 * on port @ap with sector count
4665 * PCI/etc. bus probe sem.
4668 * 0 on success, AC_ERR_* mask otherwise.
4670 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4673 struct ata_taskfile tf;
4674 unsigned int err_mask;
4676 /* set up set-features taskfile */
4677 DPRINTK("set features - SATA features\n");
4679 ata_tf_init(dev, &tf);
4680 tf.command = ATA_CMD_SET_FEATURES;
4681 tf.feature = enable;
4682 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4683 tf.protocol = ATA_PROT_NODATA;
4686 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4688 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4693 * ata_dev_init_params - Issue INIT DEV PARAMS command
4694 * @dev: Device to which command will be sent
4695 * @heads: Number of heads (taskfile parameter)
4696 * @sectors: Number of sectors (taskfile parameter)
4699 * Kernel thread context (may sleep)
4702 * 0 on success, AC_ERR_* mask otherwise.
4704 static unsigned int ata_dev_init_params(struct ata_device *dev,
4705 u16 heads, u16 sectors)
4707 struct ata_taskfile tf;
4708 unsigned int err_mask;
4710 /* Number of sectors per track 1-255. Number of heads 1-16 */
4711 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4712 return AC_ERR_INVALID;
4714 /* set up init dev params taskfile */
4715 DPRINTK("init dev params \n");
4717 ata_tf_init(dev, &tf);
4718 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4719 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4720 tf.protocol = ATA_PROT_NODATA;
4722 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4724 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4725 /* A clean abort indicates an original or just out of spec drive
4726 and we should continue as we issue the setup based on the
4727 drive reported working geometry */
4728 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4731 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4736 * ata_sg_clean - Unmap DMA memory associated with command
4737 * @qc: Command containing DMA memory to be released
4739 * Unmap all mapped DMA memory associated with this command.
4742 * spin_lock_irqsave(host lock)
4744 void ata_sg_clean(struct ata_queued_cmd *qc)
4746 struct ata_port *ap = qc->ap;
4747 struct scatterlist *sg = qc->sg;
4748 int dir = qc->dma_dir;
4750 WARN_ON(sg == NULL);
4752 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4755 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4757 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4762 * ata_fill_sg - Fill PCI IDE PRD table
4763 * @qc: Metadata associated with taskfile to be transferred
4765 * Fill PCI IDE PRD (scatter-gather) table with segments
4766 * associated with the current disk command.
4769 * spin_lock_irqsave(host lock)
4772 static void ata_fill_sg(struct ata_queued_cmd *qc)
4774 struct ata_port *ap = qc->ap;
4775 struct scatterlist *sg;
4776 unsigned int si, pi;
4779 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4783 /* determine if physical DMA addr spans 64K boundary.
4784 * Note h/w doesn't support 64-bit, so we unconditionally
4785 * truncate dma_addr_t to u32.
4787 addr = (u32) sg_dma_address(sg);
4788 sg_len = sg_dma_len(sg);
4791 offset = addr & 0xffff;
4793 if ((offset + sg_len) > 0x10000)
4794 len = 0x10000 - offset;
4796 ap->prd[pi].addr = cpu_to_le32(addr);
4797 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4798 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4806 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4810 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4811 * @qc: Metadata associated with taskfile to be transferred
4813 * Fill PCI IDE PRD (scatter-gather) table with segments
4814 * associated with the current disk command. Perform the fill
4815 * so that we avoid writing any length 64K records for
4816 * controllers that don't follow the spec.
4819 * spin_lock_irqsave(host lock)
4822 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4824 struct ata_port *ap = qc->ap;
4825 struct scatterlist *sg;
4826 unsigned int si, pi;
4829 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4831 u32 sg_len, len, blen;
4833 /* determine if physical DMA addr spans 64K boundary.
4834 * Note h/w doesn't support 64-bit, so we unconditionally
4835 * truncate dma_addr_t to u32.
4837 addr = (u32) sg_dma_address(sg);
4838 sg_len = sg_dma_len(sg);
4841 offset = addr & 0xffff;
4843 if ((offset + sg_len) > 0x10000)
4844 len = 0x10000 - offset;
4846 blen = len & 0xffff;
4847 ap->prd[pi].addr = cpu_to_le32(addr);
4849 /* Some PATA chipsets like the CS5530 can't
4850 cope with 0x0000 meaning 64K as the spec says */
4851 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4853 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4855 ap->prd[pi].flags_len = cpu_to_le32(blen);
4856 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4864 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4868 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4869 * @qc: Metadata associated with taskfile to check
4871 * Allow low-level driver to filter ATA PACKET commands, returning
4872 * a status indicating whether or not it is OK to use DMA for the
4873 * supplied PACKET command.
4876 * spin_lock_irqsave(host lock)
4878 * RETURNS: 0 when ATAPI DMA can be used
4881 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4883 struct ata_port *ap = qc->ap;
4885 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4886 * few ATAPI devices choke on such DMA requests.
4888 if (unlikely(qc->nbytes & 15))
4891 if (ap->ops->check_atapi_dma)
4892 return ap->ops->check_atapi_dma(qc);
4898 * ata_std_qc_defer - Check whether a qc needs to be deferred
4899 * @qc: ATA command in question
4901 * Non-NCQ commands cannot run with any other command, NCQ or
4902 * not. As upper layer only knows the queue depth, we are
4903 * responsible for maintaining exclusion. This function checks
4904 * whether a new command @qc can be issued.
4907 * spin_lock_irqsave(host lock)
4910 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4912 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4914 struct ata_link *link = qc->dev->link;
4916 if (qc->tf.protocol == ATA_PROT_NCQ) {
4917 if (!ata_tag_valid(link->active_tag))
4920 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4924 return ATA_DEFER_LINK;
4928 * ata_qc_prep - Prepare taskfile for submission
4929 * @qc: Metadata associated with taskfile to be prepared
4931 * Prepare ATA taskfile for submission.
4934 * spin_lock_irqsave(host lock)
4936 void ata_qc_prep(struct ata_queued_cmd *qc)
4938 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4945 * ata_dumb_qc_prep - Prepare taskfile for submission
4946 * @qc: Metadata associated with taskfile to be prepared
4948 * Prepare ATA taskfile for submission.
4951 * spin_lock_irqsave(host lock)
4953 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4955 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4958 ata_fill_sg_dumb(qc);
4961 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4964 * ata_sg_init - Associate command with scatter-gather table.
4965 * @qc: Command to be associated
4966 * @sg: Scatter-gather table.
4967 * @n_elem: Number of elements in s/g table.
4969 * Initialize the data-related elements of queued_cmd @qc
4970 * to point to a scatter-gather table @sg, containing @n_elem
4974 * spin_lock_irqsave(host lock)
4976 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4977 unsigned int n_elem)
4980 qc->n_elem = n_elem;
4985 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4986 * @qc: Command with scatter-gather table to be mapped.
4988 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4991 * spin_lock_irqsave(host lock)
4994 * Zero on success, negative on error.
4997 static int ata_sg_setup(struct ata_queued_cmd *qc)
4999 struct ata_port *ap = qc->ap;
5000 unsigned int n_elem;
5002 VPRINTK("ENTER, ata%u\n", ap->print_id);
5004 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5008 DPRINTK("%d sg elements mapped\n", n_elem);
5010 qc->n_elem = n_elem;
5011 qc->flags |= ATA_QCFLAG_DMAMAP;
5017 * swap_buf_le16 - swap halves of 16-bit words in place
5018 * @buf: Buffer to swap
5019 * @buf_words: Number of 16-bit words in buffer.
5021 * Swap halves of 16-bit words if needed to convert from
5022 * little-endian byte order to native cpu byte order, or
5026 * Inherited from caller.
5028 void swap_buf_le16(u16 *buf, unsigned int buf_words)
5033 for (i = 0; i < buf_words; i++)
5034 buf[i] = le16_to_cpu(buf[i]);
5035 #endif /* __BIG_ENDIAN */
5039 * ata_data_xfer - Transfer data by PIO
5040 * @dev: device to target
5042 * @buflen: buffer length
5045 * Transfer data from/to the device data register by PIO.
5048 * Inherited from caller.
5053 unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5054 unsigned int buflen, int rw)
5056 struct ata_port *ap = dev->link->ap;
5057 void __iomem *data_addr = ap->ioaddr.data_addr;
5058 unsigned int words = buflen >> 1;
5060 /* Transfer multiple of 2 bytes */
5062 ioread16_rep(data_addr, buf, words);
5064 iowrite16_rep(data_addr, buf, words);
5066 /* Transfer trailing 1 byte, if any. */
5067 if (unlikely(buflen & 0x01)) {
5068 __le16 align_buf[1] = { 0 };
5069 unsigned char *trailing_buf = buf + buflen - 1;
5072 align_buf[0] = cpu_to_le16(ioread16(data_addr));
5073 memcpy(trailing_buf, align_buf, 1);
5075 memcpy(align_buf, trailing_buf, 1);
5076 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
5085 * ata_data_xfer_noirq - Transfer data by PIO
5086 * @dev: device to target
5088 * @buflen: buffer length
5091 * Transfer data from/to the device data register by PIO. Do the
5092 * transfer with interrupts disabled.
5095 * Inherited from caller.
5100 unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5101 unsigned int buflen, int rw)
5103 unsigned long flags;
5104 unsigned int consumed;
5106 local_irq_save(flags);
5107 consumed = ata_data_xfer(dev, buf, buflen, rw);
5108 local_irq_restore(flags);
5115 * ata_pio_sector - Transfer a sector of data.
5116 * @qc: Command on going
5118 * Transfer qc->sect_size bytes of data from/to the ATA device.
5121 * Inherited from caller.
5124 static void ata_pio_sector(struct ata_queued_cmd *qc)
5126 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5127 struct ata_port *ap = qc->ap;
5129 unsigned int offset;
5132 if (qc->curbytes == qc->nbytes - qc->sect_size)
5133 ap->hsm_task_state = HSM_ST_LAST;
5135 page = sg_page(qc->cursg);
5136 offset = qc->cursg->offset + qc->cursg_ofs;
5138 /* get the current page and offset */
5139 page = nth_page(page, (offset >> PAGE_SHIFT));
5140 offset %= PAGE_SIZE;
5142 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5144 if (PageHighMem(page)) {
5145 unsigned long flags;
5147 /* FIXME: use a bounce buffer */
5148 local_irq_save(flags);
5149 buf = kmap_atomic(page, KM_IRQ0);
5151 /* do the actual data transfer */
5152 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5154 kunmap_atomic(buf, KM_IRQ0);
5155 local_irq_restore(flags);
5157 buf = page_address(page);
5158 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5161 qc->curbytes += qc->sect_size;
5162 qc->cursg_ofs += qc->sect_size;
5164 if (qc->cursg_ofs == qc->cursg->length) {
5165 qc->cursg = sg_next(qc->cursg);
5171 * ata_pio_sectors - Transfer one or many sectors.
5172 * @qc: Command on going
5174 * Transfer one or many sectors of data from/to the
5175 * ATA device for the DRQ request.
5178 * Inherited from caller.
5181 static void ata_pio_sectors(struct ata_queued_cmd *qc)
5183 if (is_multi_taskfile(&qc->tf)) {
5184 /* READ/WRITE MULTIPLE */
5187 WARN_ON(qc->dev->multi_count == 0);
5189 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5190 qc->dev->multi_count);
5196 ata_altstatus(qc->ap); /* flush */
5200 * atapi_send_cdb - Write CDB bytes to hardware
5201 * @ap: Port to which ATAPI device is attached.
5202 * @qc: Taskfile currently active
5204 * When device has indicated its readiness to accept
5205 * a CDB, this function is called. Send the CDB.
5211 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5214 DPRINTK("send cdb\n");
5215 WARN_ON(qc->dev->cdb_len < 12);
5217 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5218 ata_altstatus(ap); /* flush */
5220 switch (qc->tf.protocol) {
5221 case ATAPI_PROT_PIO:
5222 ap->hsm_task_state = HSM_ST;
5224 case ATAPI_PROT_NODATA:
5225 ap->hsm_task_state = HSM_ST_LAST;
5227 case ATAPI_PROT_DMA:
5228 ap->hsm_task_state = HSM_ST_LAST;
5229 /* initiate bmdma */
5230 ap->ops->bmdma_start(qc);
5236 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5237 * @qc: Command on going
5238 * @bytes: number of bytes
5240 * Transfer Transfer data from/to the ATAPI device.
5243 * Inherited from caller.
5246 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5248 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5249 struct ata_port *ap = qc->ap;
5250 struct ata_device *dev = qc->dev;
5251 struct ata_eh_info *ehi = &dev->link->eh_info;
5252 struct scatterlist *sg;
5255 unsigned int offset, count, consumed;
5259 if (unlikely(!sg)) {
5260 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5261 "buf=%u cur=%u bytes=%u",
5262 qc->nbytes, qc->curbytes, bytes);
5267 offset = sg->offset + qc->cursg_ofs;
5269 /* get the current page and offset */
5270 page = nth_page(page, (offset >> PAGE_SHIFT));
5271 offset %= PAGE_SIZE;
5273 /* don't overrun current sg */
5274 count = min(sg->length - qc->cursg_ofs, bytes);
5276 /* don't cross page boundaries */
5277 count = min(count, (unsigned int)PAGE_SIZE - offset);
5279 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5281 if (PageHighMem(page)) {
5282 unsigned long flags;
5284 /* FIXME: use bounce buffer */
5285 local_irq_save(flags);
5286 buf = kmap_atomic(page, KM_IRQ0);
5288 /* do the actual data transfer */
5289 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5291 kunmap_atomic(buf, KM_IRQ0);
5292 local_irq_restore(flags);
5294 buf = page_address(page);
5295 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5298 bytes -= min(bytes, consumed);
5299 qc->curbytes += count;
5300 qc->cursg_ofs += count;
5302 if (qc->cursg_ofs == sg->length) {
5303 qc->cursg = sg_next(qc->cursg);
5307 /* consumed can be larger than count only for the last transfer */
5308 WARN_ON(qc->cursg && count != consumed);
5316 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5317 * @qc: Command on going
5319 * Transfer Transfer data from/to the ATAPI device.
5322 * Inherited from caller.
5325 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5327 struct ata_port *ap = qc->ap;
5328 struct ata_device *dev = qc->dev;
5329 struct ata_eh_info *ehi = &dev->link->eh_info;
5330 unsigned int ireason, bc_lo, bc_hi, bytes;
5331 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5333 /* Abuse qc->result_tf for temp storage of intermediate TF
5334 * here to save some kernel stack usage.
5335 * For normal completion, qc->result_tf is not relevant. For
5336 * error, qc->result_tf is later overwritten by ata_qc_complete().
5337 * So, the correctness of qc->result_tf is not affected.
5339 ap->ops->tf_read(ap, &qc->result_tf);
5340 ireason = qc->result_tf.nsect;
5341 bc_lo = qc->result_tf.lbam;
5342 bc_hi = qc->result_tf.lbah;
5343 bytes = (bc_hi << 8) | bc_lo;
5345 /* shall be cleared to zero, indicating xfer of data */
5346 if (unlikely(ireason & (1 << 0)))
5349 /* make sure transfer direction matches expected */
5350 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5351 if (unlikely(do_write != i_write))
5354 if (unlikely(!bytes))
5357 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5359 if (unlikely(__atapi_pio_bytes(qc, bytes)))
5361 ata_altstatus(ap); /* flush */
5366 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5369 qc->err_mask |= AC_ERR_HSM;
5370 ap->hsm_task_state = HSM_ST_ERR;
5374 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5375 * @ap: the target ata_port
5379 * 1 if ok in workqueue, 0 otherwise.
5382 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5384 if (qc->tf.flags & ATA_TFLAG_POLLING)
5387 if (ap->hsm_task_state == HSM_ST_FIRST) {
5388 if (qc->tf.protocol == ATA_PROT_PIO &&
5389 (qc->tf.flags & ATA_TFLAG_WRITE))
5392 if (ata_is_atapi(qc->tf.protocol) &&
5393 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5401 * ata_hsm_qc_complete - finish a qc running on standard HSM
5402 * @qc: Command to complete
5403 * @in_wq: 1 if called from workqueue, 0 otherwise
5405 * Finish @qc which is running on standard HSM.
5408 * If @in_wq is zero, spin_lock_irqsave(host lock).
5409 * Otherwise, none on entry and grabs host lock.
5411 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5413 struct ata_port *ap = qc->ap;
5414 unsigned long flags;
5416 if (ap->ops->error_handler) {
5418 spin_lock_irqsave(ap->lock, flags);
5420 /* EH might have kicked in while host lock is
5423 qc = ata_qc_from_tag(ap, qc->tag);
5425 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5426 ap->ops->irq_on(ap);
5427 ata_qc_complete(qc);
5429 ata_port_freeze(ap);
5432 spin_unlock_irqrestore(ap->lock, flags);
5434 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5435 ata_qc_complete(qc);
5437 ata_port_freeze(ap);
5441 spin_lock_irqsave(ap->lock, flags);
5442 ap->ops->irq_on(ap);
5443 ata_qc_complete(qc);
5444 spin_unlock_irqrestore(ap->lock, flags);
5446 ata_qc_complete(qc);
5451 * ata_hsm_move - move the HSM to the next state.
5452 * @ap: the target ata_port
5454 * @status: current device status
5455 * @in_wq: 1 if called from workqueue, 0 otherwise
5458 * 1 when poll next status needed, 0 otherwise.
5460 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5461 u8 status, int in_wq)
5463 unsigned long flags = 0;
5466 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5468 /* Make sure ata_qc_issue_prot() does not throw things
5469 * like DMA polling into the workqueue. Notice that
5470 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5472 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5475 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5476 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5478 switch (ap->hsm_task_state) {
5480 /* Send first data block or PACKET CDB */
5482 /* If polling, we will stay in the work queue after
5483 * sending the data. Otherwise, interrupt handler
5484 * takes over after sending the data.
5486 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5488 /* check device status */
5489 if (unlikely((status & ATA_DRQ) == 0)) {
5490 /* handle BSY=0, DRQ=0 as error */
5491 if (likely(status & (ATA_ERR | ATA_DF)))
5492 /* device stops HSM for abort/error */
5493 qc->err_mask |= AC_ERR_DEV;
5495 /* HSM violation. Let EH handle this */
5496 qc->err_mask |= AC_ERR_HSM;
5498 ap->hsm_task_state = HSM_ST_ERR;
5502 /* Device should not ask for data transfer (DRQ=1)
5503 * when it finds something wrong.
5504 * We ignore DRQ here and stop the HSM by
5505 * changing hsm_task_state to HSM_ST_ERR and
5506 * let the EH abort the command or reset the device.
5508 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5509 /* Some ATAPI tape drives forget to clear the ERR bit
5510 * when doing the next command (mostly request sense).
5511 * We ignore ERR here to workaround and proceed sending
5514 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5515 ata_port_printk(ap, KERN_WARNING,
5516 "DRQ=1 with device error, "
5517 "dev_stat 0x%X\n", status);
5518 qc->err_mask |= AC_ERR_HSM;
5519 ap->hsm_task_state = HSM_ST_ERR;
5524 /* Send the CDB (atapi) or the first data block (ata pio out).
5525 * During the state transition, interrupt handler shouldn't
5526 * be invoked before the data transfer is complete and
5527 * hsm_task_state is changed. Hence, the following locking.
5530 spin_lock_irqsave(ap->lock, flags);
5532 if (qc->tf.protocol == ATA_PROT_PIO) {
5533 /* PIO data out protocol.
5534 * send first data block.
5537 /* ata_pio_sectors() might change the state
5538 * to HSM_ST_LAST. so, the state is changed here
5539 * before ata_pio_sectors().
5541 ap->hsm_task_state = HSM_ST;
5542 ata_pio_sectors(qc);
5545 atapi_send_cdb(ap, qc);
5548 spin_unlock_irqrestore(ap->lock, flags);
5550 /* if polling, ata_pio_task() handles the rest.
5551 * otherwise, interrupt handler takes over from here.
5556 /* complete command or read/write the data register */
5557 if (qc->tf.protocol == ATAPI_PROT_PIO) {
5558 /* ATAPI PIO protocol */
5559 if ((status & ATA_DRQ) == 0) {
5560 /* No more data to transfer or device error.
5561 * Device error will be tagged in HSM_ST_LAST.
5563 ap->hsm_task_state = HSM_ST_LAST;
5567 /* Device should not ask for data transfer (DRQ=1)
5568 * when it finds something wrong.
5569 * We ignore DRQ here and stop the HSM by
5570 * changing hsm_task_state to HSM_ST_ERR and
5571 * let the EH abort the command or reset the device.
5573 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5574 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5575 "device error, dev_stat 0x%X\n",
5577 qc->err_mask |= AC_ERR_HSM;
5578 ap->hsm_task_state = HSM_ST_ERR;
5582 atapi_pio_bytes(qc);
5584 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5585 /* bad ireason reported by device */
5589 /* ATA PIO protocol */
5590 if (unlikely((status & ATA_DRQ) == 0)) {
5591 /* handle BSY=0, DRQ=0 as error */
5592 if (likely(status & (ATA_ERR | ATA_DF)))
5593 /* device stops HSM for abort/error */
5594 qc->err_mask |= AC_ERR_DEV;
5596 /* HSM violation. Let EH handle this.
5597 * Phantom devices also trigger this
5598 * condition. Mark hint.
5600 qc->err_mask |= AC_ERR_HSM |
5603 ap->hsm_task_state = HSM_ST_ERR;
5607 /* For PIO reads, some devices may ask for
5608 * data transfer (DRQ=1) alone with ERR=1.
5609 * We respect DRQ here and transfer one
5610 * block of junk data before changing the
5611 * hsm_task_state to HSM_ST_ERR.
5613 * For PIO writes, ERR=1 DRQ=1 doesn't make
5614 * sense since the data block has been
5615 * transferred to the device.
5617 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5618 /* data might be corrputed */
5619 qc->err_mask |= AC_ERR_DEV;
5621 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5622 ata_pio_sectors(qc);
5623 status = ata_wait_idle(ap);
5626 if (status & (ATA_BUSY | ATA_DRQ))
5627 qc->err_mask |= AC_ERR_HSM;
5629 /* ata_pio_sectors() might change the
5630 * state to HSM_ST_LAST. so, the state
5631 * is changed after ata_pio_sectors().
5633 ap->hsm_task_state = HSM_ST_ERR;
5637 ata_pio_sectors(qc);
5639 if (ap->hsm_task_state == HSM_ST_LAST &&
5640 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5642 status = ata_wait_idle(ap);
5651 if (unlikely(!ata_ok(status))) {
5652 qc->err_mask |= __ac_err_mask(status);
5653 ap->hsm_task_state = HSM_ST_ERR;
5657 /* no more data to transfer */
5658 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5659 ap->print_id, qc->dev->devno, status);
5661 WARN_ON(qc->err_mask);
5663 ap->hsm_task_state = HSM_ST_IDLE;
5665 /* complete taskfile transaction */
5666 ata_hsm_qc_complete(qc, in_wq);
5672 /* make sure qc->err_mask is available to
5673 * know what's wrong and recover
5675 WARN_ON(qc->err_mask == 0);
5677 ap->hsm_task_state = HSM_ST_IDLE;
5679 /* complete taskfile transaction */
5680 ata_hsm_qc_complete(qc, in_wq);
5692 static void ata_pio_task(struct work_struct *work)
5694 struct ata_port *ap =
5695 container_of(work, struct ata_port, port_task.work);
5696 struct ata_queued_cmd *qc = ap->port_task_data;
5701 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5704 * This is purely heuristic. This is a fast path.
5705 * Sometimes when we enter, BSY will be cleared in
5706 * a chk-status or two. If not, the drive is probably seeking
5707 * or something. Snooze for a couple msecs, then
5708 * chk-status again. If still busy, queue delayed work.
5710 status = ata_busy_wait(ap, ATA_BUSY, 5);
5711 if (status & ATA_BUSY) {
5713 status = ata_busy_wait(ap, ATA_BUSY, 10);
5714 if (status & ATA_BUSY) {
5715 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
5721 poll_next = ata_hsm_move(ap, qc, status, 1);
5723 /* another command or interrupt handler
5724 * may be running at this point.
5731 * ata_qc_new - Request an available ATA command, for queueing
5732 * @ap: Port associated with device @dev
5733 * @dev: Device from whom we request an available command structure
5739 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5741 struct ata_queued_cmd *qc = NULL;
5744 /* no command while frozen */
5745 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5748 /* the last tag is reserved for internal command. */
5749 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5750 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5751 qc = __ata_qc_from_tag(ap, i);
5762 * ata_qc_new_init - Request an available ATA command, and initialize it
5763 * @dev: Device from whom we request an available command structure
5769 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5771 struct ata_port *ap = dev->link->ap;
5772 struct ata_queued_cmd *qc;
5774 qc = ata_qc_new(ap);
5787 * ata_qc_free - free unused ata_queued_cmd
5788 * @qc: Command to complete
5790 * Designed to free unused ata_queued_cmd object
5791 * in case something prevents using it.
5794 * spin_lock_irqsave(host lock)
5796 void ata_qc_free(struct ata_queued_cmd *qc)
5798 struct ata_port *ap = qc->ap;
5801 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5805 if (likely(ata_tag_valid(tag))) {
5806 qc->tag = ATA_TAG_POISON;
5807 clear_bit(tag, &ap->qc_allocated);
5811 void __ata_qc_complete(struct ata_queued_cmd *qc)
5813 struct ata_port *ap = qc->ap;
5814 struct ata_link *link = qc->dev->link;
5816 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5817 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5819 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5822 /* command should be marked inactive atomically with qc completion */
5823 if (qc->tf.protocol == ATA_PROT_NCQ) {
5824 link->sactive &= ~(1 << qc->tag);
5826 ap->nr_active_links--;
5828 link->active_tag = ATA_TAG_POISON;
5829 ap->nr_active_links--;
5832 /* clear exclusive status */
5833 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5834 ap->excl_link == link))
5835 ap->excl_link = NULL;
5837 /* atapi: mark qc as inactive to prevent the interrupt handler
5838 * from completing the command twice later, before the error handler
5839 * is called. (when rc != 0 and atapi request sense is needed)
5841 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5842 ap->qc_active &= ~(1 << qc->tag);
5844 /* call completion callback */
5845 qc->complete_fn(qc);
5848 static void fill_result_tf(struct ata_queued_cmd *qc)
5850 struct ata_port *ap = qc->ap;
5852 qc->result_tf.flags = qc->tf.flags;
5853 ap->ops->tf_read(ap, &qc->result_tf);
5856 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5858 struct ata_device *dev = qc->dev;
5860 if (ata_tag_internal(qc->tag))
5863 if (ata_is_nodata(qc->tf.protocol))
5866 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5869 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5873 * ata_qc_complete - Complete an active ATA command
5874 * @qc: Command to complete
5875 * @err_mask: ATA Status register contents
5877 * Indicate to the mid and upper layers that an ATA
5878 * command has completed, with either an ok or not-ok status.
5881 * spin_lock_irqsave(host lock)
5883 void ata_qc_complete(struct ata_queued_cmd *qc)
5885 struct ata_port *ap = qc->ap;
5887 /* XXX: New EH and old EH use different mechanisms to
5888 * synchronize EH with regular execution path.
5890 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5891 * Normal execution path is responsible for not accessing a
5892 * failed qc. libata core enforces the rule by returning NULL
5893 * from ata_qc_from_tag() for failed qcs.
5895 * Old EH depends on ata_qc_complete() nullifying completion
5896 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5897 * not synchronize with interrupt handler. Only PIO task is
5900 if (ap->ops->error_handler) {
5901 struct ata_device *dev = qc->dev;
5902 struct ata_eh_info *ehi = &dev->link->eh_info;
5904 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5906 if (unlikely(qc->err_mask))
5907 qc->flags |= ATA_QCFLAG_FAILED;
5909 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5910 if (!ata_tag_internal(qc->tag)) {
5911 /* always fill result TF for failed qc */
5913 ata_qc_schedule_eh(qc);
5918 /* read result TF if requested */
5919 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5922 /* Some commands need post-processing after successful
5925 switch (qc->tf.command) {
5926 case ATA_CMD_SET_FEATURES:
5927 if (qc->tf.feature != SETFEATURES_WC_ON &&
5928 qc->tf.feature != SETFEATURES_WC_OFF)
5931 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5932 case ATA_CMD_SET_MULTI: /* multi_count changed */
5933 /* revalidate device */
5934 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5935 ata_port_schedule_eh(ap);
5939 dev->flags |= ATA_DFLAG_SLEEPING;
5943 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5944 ata_verify_xfer(qc);
5946 __ata_qc_complete(qc);
5948 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5951 /* read result TF if failed or requested */
5952 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5955 __ata_qc_complete(qc);
5960 * ata_qc_complete_multiple - Complete multiple qcs successfully
5961 * @ap: port in question
5962 * @qc_active: new qc_active mask
5963 * @finish_qc: LLDD callback invoked before completing a qc
5965 * Complete in-flight commands. This functions is meant to be
5966 * called from low-level driver's interrupt routine to complete
5967 * requests normally. ap->qc_active and @qc_active is compared
5968 * and commands are completed accordingly.
5971 * spin_lock_irqsave(host lock)
5974 * Number of completed commands on success, -errno otherwise.
5976 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5977 void (*finish_qc)(struct ata_queued_cmd *))
5983 done_mask = ap->qc_active ^ qc_active;
5985 if (unlikely(done_mask & qc_active)) {
5986 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5987 "(%08x->%08x)\n", ap->qc_active, qc_active);
5991 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5992 struct ata_queued_cmd *qc;
5994 if (!(done_mask & (1 << i)))
5997 if ((qc = ata_qc_from_tag(ap, i))) {
6000 ata_qc_complete(qc);
6009 * ata_qc_issue - issue taskfile to device
6010 * @qc: command to issue to device
6012 * Prepare an ATA command to submission to device.
6013 * This includes mapping the data into a DMA-able
6014 * area, filling in the S/G table, and finally
6015 * writing the taskfile to hardware, starting the command.
6018 * spin_lock_irqsave(host lock)
6020 void ata_qc_issue(struct ata_queued_cmd *qc)
6022 struct ata_port *ap = qc->ap;
6023 struct ata_link *link = qc->dev->link;
6024 u8 prot = qc->tf.protocol;
6026 /* Make sure only one non-NCQ command is outstanding. The
6027 * check is skipped for old EH because it reuses active qc to
6028 * request ATAPI sense.
6030 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
6032 if (ata_is_ncq(prot)) {
6033 WARN_ON(link->sactive & (1 << qc->tag));
6036 ap->nr_active_links++;
6037 link->sactive |= 1 << qc->tag;
6039 WARN_ON(link->sactive);
6041 ap->nr_active_links++;
6042 link->active_tag = qc->tag;
6045 qc->flags |= ATA_QCFLAG_ACTIVE;
6046 ap->qc_active |= 1 << qc->tag;
6048 /* We guarantee to LLDs that they will have at least one
6049 * non-zero sg if the command is a data command.
6051 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
6053 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
6054 (ap->flags & ATA_FLAG_PIO_DMA)))
6055 if (ata_sg_setup(qc))
6058 /* if device is sleeping, schedule softreset and abort the link */
6059 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6060 link->eh_info.action |= ATA_EH_SOFTRESET;
6061 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6062 ata_link_abort(link);
6066 ap->ops->qc_prep(qc);
6068 qc->err_mask |= ap->ops->qc_issue(qc);
6069 if (unlikely(qc->err_mask))
6074 qc->err_mask |= AC_ERR_SYSTEM;
6076 ata_qc_complete(qc);
6080 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6081 * @qc: command to issue to device
6083 * Using various libata functions and hooks, this function
6084 * starts an ATA command. ATA commands are grouped into
6085 * classes called "protocols", and issuing each type of protocol
6086 * is slightly different.
6088 * May be used as the qc_issue() entry in ata_port_operations.
6091 * spin_lock_irqsave(host lock)
6094 * Zero on success, AC_ERR_* mask on failure
6097 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6099 struct ata_port *ap = qc->ap;
6101 /* Use polling pio if the LLD doesn't handle
6102 * interrupt driven pio and atapi CDB interrupt.
6104 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6105 switch (qc->tf.protocol) {
6107 case ATA_PROT_NODATA:
6108 case ATAPI_PROT_PIO:
6109 case ATAPI_PROT_NODATA:
6110 qc->tf.flags |= ATA_TFLAG_POLLING;
6112 case ATAPI_PROT_DMA:
6113 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6114 /* see ata_dma_blacklisted() */
6122 /* select the device */
6123 ata_dev_select(ap, qc->dev->devno, 1, 0);
6125 /* start the command */
6126 switch (qc->tf.protocol) {
6127 case ATA_PROT_NODATA:
6128 if (qc->tf.flags & ATA_TFLAG_POLLING)
6129 ata_qc_set_polling(qc);
6131 ata_tf_to_host(ap, &qc->tf);
6132 ap->hsm_task_state = HSM_ST_LAST;
6134 if (qc->tf.flags & ATA_TFLAG_POLLING)
6135 ata_pio_queue_task(ap, qc, 0);
6140 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6142 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6143 ap->ops->bmdma_setup(qc); /* set up bmdma */
6144 ap->ops->bmdma_start(qc); /* initiate bmdma */
6145 ap->hsm_task_state = HSM_ST_LAST;
6149 if (qc->tf.flags & ATA_TFLAG_POLLING)
6150 ata_qc_set_polling(qc);
6152 ata_tf_to_host(ap, &qc->tf);
6154 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6155 /* PIO data out protocol */
6156 ap->hsm_task_state = HSM_ST_FIRST;
6157 ata_pio_queue_task(ap, qc, 0);
6159 /* always send first data block using
6160 * the ata_pio_task() codepath.
6163 /* PIO data in protocol */
6164 ap->hsm_task_state = HSM_ST;
6166 if (qc->tf.flags & ATA_TFLAG_POLLING)
6167 ata_pio_queue_task(ap, qc, 0);
6169 /* if polling, ata_pio_task() handles the rest.
6170 * otherwise, interrupt handler takes over from here.
6176 case ATAPI_PROT_PIO:
6177 case ATAPI_PROT_NODATA:
6178 if (qc->tf.flags & ATA_TFLAG_POLLING)
6179 ata_qc_set_polling(qc);
6181 ata_tf_to_host(ap, &qc->tf);
6183 ap->hsm_task_state = HSM_ST_FIRST;
6185 /* send cdb by polling if no cdb interrupt */
6186 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6187 (qc->tf.flags & ATA_TFLAG_POLLING))
6188 ata_pio_queue_task(ap, qc, 0);
6191 case ATAPI_PROT_DMA:
6192 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6194 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6195 ap->ops->bmdma_setup(qc); /* set up bmdma */
6196 ap->hsm_task_state = HSM_ST_FIRST;
6198 /* send cdb by polling if no cdb interrupt */
6199 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6200 ata_pio_queue_task(ap, qc, 0);
6205 return AC_ERR_SYSTEM;
6212 * ata_host_intr - Handle host interrupt for given (port, task)
6213 * @ap: Port on which interrupt arrived (possibly...)
6214 * @qc: Taskfile currently active in engine
6216 * Handle host interrupt for given queued command. Currently,
6217 * only DMA interrupts are handled. All other commands are
6218 * handled via polling with interrupts disabled (nIEN bit).
6221 * spin_lock_irqsave(host lock)
6224 * One if interrupt was handled, zero if not (shared irq).
6227 inline unsigned int ata_host_intr(struct ata_port *ap,
6228 struct ata_queued_cmd *qc)
6230 struct ata_eh_info *ehi = &ap->link.eh_info;
6231 u8 status, host_stat = 0;
6233 VPRINTK("ata%u: protocol %d task_state %d\n",
6234 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6236 /* Check whether we are expecting interrupt in this state */
6237 switch (ap->hsm_task_state) {
6239 /* Some pre-ATAPI-4 devices assert INTRQ
6240 * at this state when ready to receive CDB.
6243 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6244 * The flag was turned on only for atapi devices. No
6245 * need to check ata_is_atapi(qc->tf.protocol) again.
6247 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6251 if (qc->tf.protocol == ATA_PROT_DMA ||
6252 qc->tf.protocol == ATAPI_PROT_DMA) {
6253 /* check status of DMA engine */
6254 host_stat = ap->ops->bmdma_status(ap);
6255 VPRINTK("ata%u: host_stat 0x%X\n",
6256 ap->print_id, host_stat);
6258 /* if it's not our irq... */
6259 if (!(host_stat & ATA_DMA_INTR))
6262 /* before we do anything else, clear DMA-Start bit */
6263 ap->ops->bmdma_stop(qc);
6265 if (unlikely(host_stat & ATA_DMA_ERR)) {
6266 /* error when transfering data to/from memory */
6267 qc->err_mask |= AC_ERR_HOST_BUS;
6268 ap->hsm_task_state = HSM_ST_ERR;
6278 /* check altstatus */
6279 status = ata_altstatus(ap);
6280 if (status & ATA_BUSY)
6283 /* check main status, clearing INTRQ */
6284 status = ata_chk_status(ap);
6285 if (unlikely(status & ATA_BUSY))
6288 /* ack bmdma irq events */
6289 ap->ops->irq_clear(ap);
6291 ata_hsm_move(ap, qc, status, 0);
6293 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6294 qc->tf.protocol == ATAPI_PROT_DMA))
6295 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6297 return 1; /* irq handled */
6300 ap->stats.idle_irq++;
6303 if ((ap->stats.idle_irq % 1000) == 0) {
6305 ap->ops->irq_clear(ap);
6306 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6310 return 0; /* irq not handled */
6314 * ata_interrupt - Default ATA host interrupt handler
6315 * @irq: irq line (unused)
6316 * @dev_instance: pointer to our ata_host information structure
6318 * Default interrupt handler for PCI IDE devices. Calls
6319 * ata_host_intr() for each port that is not disabled.
6322 * Obtains host lock during operation.
6325 * IRQ_NONE or IRQ_HANDLED.
6328 irqreturn_t ata_interrupt(int irq, void *dev_instance)
6330 struct ata_host *host = dev_instance;
6332 unsigned int handled = 0;
6333 unsigned long flags;
6335 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6336 spin_lock_irqsave(&host->lock, flags);
6338 for (i = 0; i < host->n_ports; i++) {
6339 struct ata_port *ap;
6341 ap = host->ports[i];
6343 !(ap->flags & ATA_FLAG_DISABLED)) {
6344 struct ata_queued_cmd *qc;
6346 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6347 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6348 (qc->flags & ATA_QCFLAG_ACTIVE))
6349 handled |= ata_host_intr(ap, qc);
6353 spin_unlock_irqrestore(&host->lock, flags);
6355 return IRQ_RETVAL(handled);
6359 * sata_scr_valid - test whether SCRs are accessible
6360 * @link: ATA link to test SCR accessibility for
6362 * Test whether SCRs are accessible for @link.
6368 * 1 if SCRs are accessible, 0 otherwise.
6370 int sata_scr_valid(struct ata_link *link)
6372 struct ata_port *ap = link->ap;
6374 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6378 * sata_scr_read - read SCR register of the specified port
6379 * @link: ATA link to read SCR for
6381 * @val: Place to store read value
6383 * Read SCR register @reg of @link into *@val. This function is
6384 * guaranteed to succeed if @link is ap->link, the cable type of
6385 * the port is SATA and the port implements ->scr_read.
6388 * None if @link is ap->link. Kernel thread context otherwise.
6391 * 0 on success, negative errno on failure.
6393 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6395 if (ata_is_host_link(link)) {
6396 struct ata_port *ap = link->ap;
6398 if (sata_scr_valid(link))
6399 return ap->ops->scr_read(ap, reg, val);
6403 return sata_pmp_scr_read(link, reg, val);
6407 * sata_scr_write - write SCR register of the specified port
6408 * @link: ATA link to write SCR for
6409 * @reg: SCR to write
6410 * @val: value to write
6412 * Write @val to SCR register @reg of @link. This function is
6413 * guaranteed to succeed if @link is ap->link, the cable type of
6414 * the port is SATA and the port implements ->scr_read.
6417 * None if @link is ap->link. Kernel thread context otherwise.
6420 * 0 on success, negative errno on failure.
6422 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6424 if (ata_is_host_link(link)) {
6425 struct ata_port *ap = link->ap;
6427 if (sata_scr_valid(link))
6428 return ap->ops->scr_write(ap, reg, val);
6432 return sata_pmp_scr_write(link, reg, val);
6436 * sata_scr_write_flush - write SCR register of the specified port and flush
6437 * @link: ATA link to write SCR for
6438 * @reg: SCR to write
6439 * @val: value to write
6441 * This function is identical to sata_scr_write() except that this
6442 * function performs flush after writing to the register.
6445 * None if @link is ap->link. Kernel thread context otherwise.
6448 * 0 on success, negative errno on failure.
6450 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6452 if (ata_is_host_link(link)) {
6453 struct ata_port *ap = link->ap;
6456 if (sata_scr_valid(link)) {
6457 rc = ap->ops->scr_write(ap, reg, val);
6459 rc = ap->ops->scr_read(ap, reg, &val);
6465 return sata_pmp_scr_write(link, reg, val);
6469 * ata_link_online - test whether the given link is online
6470 * @link: ATA link to test
6472 * Test whether @link is online. Note that this function returns
6473 * 0 if online status of @link cannot be obtained, so
6474 * ata_link_online(link) != !ata_link_offline(link).
6480 * 1 if the port online status is available and online.
6482 int ata_link_online(struct ata_link *link)
6486 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6487 (sstatus & 0xf) == 0x3)
6493 * ata_link_offline - test whether the given link is offline
6494 * @link: ATA link to test
6496 * Test whether @link is offline. Note that this function
6497 * returns 0 if offline status of @link cannot be obtained, so
6498 * ata_link_online(link) != !ata_link_offline(link).
6504 * 1 if the port offline status is available and offline.
6506 int ata_link_offline(struct ata_link *link)
6510 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6511 (sstatus & 0xf) != 0x3)
6516 int ata_flush_cache(struct ata_device *dev)
6518 unsigned int err_mask;
6521 if (!ata_try_flush_cache(dev))
6524 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6525 cmd = ATA_CMD_FLUSH_EXT;
6527 cmd = ATA_CMD_FLUSH;
6529 /* This is wrong. On a failed flush we get back the LBA of the lost
6530 sector and we should (assuming it wasn't aborted as unknown) issue
6531 a further flush command to continue the writeback until it
6533 err_mask = ata_do_simple_cmd(dev, cmd);
6535 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6543 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6544 unsigned int action, unsigned int ehi_flags,
6547 unsigned long flags;
6550 for (i = 0; i < host->n_ports; i++) {
6551 struct ata_port *ap = host->ports[i];
6552 struct ata_link *link;
6554 /* Previous resume operation might still be in
6555 * progress. Wait for PM_PENDING to clear.
6557 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6558 ata_port_wait_eh(ap);
6559 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6562 /* request PM ops to EH */
6563 spin_lock_irqsave(ap->lock, flags);
6568 ap->pm_result = &rc;
6571 ap->pflags |= ATA_PFLAG_PM_PENDING;
6572 __ata_port_for_each_link(link, ap) {
6573 link->eh_info.action |= action;
6574 link->eh_info.flags |= ehi_flags;
6577 ata_port_schedule_eh(ap);
6579 spin_unlock_irqrestore(ap->lock, flags);
6581 /* wait and check result */
6583 ata_port_wait_eh(ap);
6584 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6594 * ata_host_suspend - suspend host
6595 * @host: host to suspend
6598 * Suspend @host. Actual operation is performed by EH. This
6599 * function requests EH to perform PM operations and waits for EH
6603 * Kernel thread context (may sleep).
6606 * 0 on success, -errno on failure.
6608 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6613 * disable link pm on all ports before requesting
6616 ata_lpm_enable(host);
6618 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6620 host->dev->power.power_state = mesg;
6625 * ata_host_resume - resume host
6626 * @host: host to resume
6628 * Resume @host. Actual operation is performed by EH. This
6629 * function requests EH to perform PM operations and returns.
6630 * Note that all resume operations are performed parallely.
6633 * Kernel thread context (may sleep).
6635 void ata_host_resume(struct ata_host *host)
6637 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6638 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6639 host->dev->power.power_state = PMSG_ON;
6641 /* reenable link pm */
6642 ata_lpm_disable(host);
6647 * ata_port_start - Set port up for dma.
6648 * @ap: Port to initialize
6650 * Called just after data structures for each port are
6651 * initialized. Allocates space for PRD table.
6653 * May be used as the port_start() entry in ata_port_operations.
6656 * Inherited from caller.
6658 int ata_port_start(struct ata_port *ap)
6660 struct device *dev = ap->dev;
6662 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6671 * ata_dev_init - Initialize an ata_device structure
6672 * @dev: Device structure to initialize
6674 * Initialize @dev in preparation for probing.
6677 * Inherited from caller.
6679 void ata_dev_init(struct ata_device *dev)
6681 struct ata_link *link = dev->link;
6682 struct ata_port *ap = link->ap;
6683 unsigned long flags;
6685 /* SATA spd limit is bound to the first device */
6686 link->sata_spd_limit = link->hw_sata_spd_limit;
6689 /* High bits of dev->flags are used to record warm plug
6690 * requests which occur asynchronously. Synchronize using
6693 spin_lock_irqsave(ap->lock, flags);
6694 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6696 spin_unlock_irqrestore(ap->lock, flags);
6698 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6699 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6700 dev->pio_mask = UINT_MAX;
6701 dev->mwdma_mask = UINT_MAX;
6702 dev->udma_mask = UINT_MAX;
6706 * ata_link_init - Initialize an ata_link structure
6707 * @ap: ATA port link is attached to
6708 * @link: Link structure to initialize
6709 * @pmp: Port multiplier port number
6714 * Kernel thread context (may sleep)
6716 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6720 /* clear everything except for devices */
6721 memset(link, 0, offsetof(struct ata_link, device[0]));
6725 link->active_tag = ATA_TAG_POISON;
6726 link->hw_sata_spd_limit = UINT_MAX;
6728 /* can't use iterator, ap isn't initialized yet */
6729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6730 struct ata_device *dev = &link->device[i];
6733 dev->devno = dev - link->device;
6739 * sata_link_init_spd - Initialize link->sata_spd_limit
6740 * @link: Link to configure sata_spd_limit for
6742 * Initialize @link->[hw_]sata_spd_limit to the currently
6746 * Kernel thread context (may sleep).
6749 * 0 on success, -errno on failure.
6751 int sata_link_init_spd(struct ata_link *link)
6757 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6761 spd = (scontrol >> 4) & 0xf;
6763 link->hw_sata_spd_limit &= (1 << spd) - 1;
6765 ata_force_spd_limit(link);
6767 link->sata_spd_limit = link->hw_sata_spd_limit;
6773 * ata_port_alloc - allocate and initialize basic ATA port resources
6774 * @host: ATA host this allocated port belongs to
6776 * Allocate and initialize basic ATA port resources.
6779 * Allocate ATA port on success, NULL on failure.
6782 * Inherited from calling layer (may sleep).
6784 struct ata_port *ata_port_alloc(struct ata_host *host)
6786 struct ata_port *ap;
6790 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6794 ap->pflags |= ATA_PFLAG_INITIALIZING;
6795 ap->lock = &host->lock;
6796 ap->flags = ATA_FLAG_DISABLED;
6798 ap->ctl = ATA_DEVCTL_OBS;
6800 ap->dev = host->dev;
6801 ap->last_ctl = 0xFF;
6803 #if defined(ATA_VERBOSE_DEBUG)
6804 /* turn on all debugging levels */
6805 ap->msg_enable = 0x00FF;
6806 #elif defined(ATA_DEBUG)
6807 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6809 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6812 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
6813 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6814 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6815 INIT_LIST_HEAD(&ap->eh_done_q);
6816 init_waitqueue_head(&ap->eh_wait_q);
6817 init_timer_deferrable(&ap->fastdrain_timer);
6818 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6819 ap->fastdrain_timer.data = (unsigned long)ap;
6821 ap->cbl = ATA_CBL_NONE;
6823 ata_link_init(ap, &ap->link, 0);
6826 ap->stats.unhandled_irq = 1;
6827 ap->stats.idle_irq = 1;
6832 static void ata_host_release(struct device *gendev, void *res)
6834 struct ata_host *host = dev_get_drvdata(gendev);
6837 for (i = 0; i < host->n_ports; i++) {
6838 struct ata_port *ap = host->ports[i];
6844 scsi_host_put(ap->scsi_host);
6846 kfree(ap->pmp_link);
6848 host->ports[i] = NULL;
6851 dev_set_drvdata(gendev, NULL);
6855 * ata_host_alloc - allocate and init basic ATA host resources
6856 * @dev: generic device this host is associated with
6857 * @max_ports: maximum number of ATA ports associated with this host
6859 * Allocate and initialize basic ATA host resources. LLD calls
6860 * this function to allocate a host, initializes it fully and
6861 * attaches it using ata_host_register().
6863 * @max_ports ports are allocated and host->n_ports is
6864 * initialized to @max_ports. The caller is allowed to decrease
6865 * host->n_ports before calling ata_host_register(). The unused
6866 * ports will be automatically freed on registration.
6869 * Allocate ATA host on success, NULL on failure.
6872 * Inherited from calling layer (may sleep).
6874 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6876 struct ata_host *host;
6882 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6885 /* alloc a container for our list of ATA ports (buses) */
6886 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6887 /* alloc a container for our list of ATA ports (buses) */
6888 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6892 devres_add(dev, host);
6893 dev_set_drvdata(dev, host);
6895 spin_lock_init(&host->lock);
6897 host->n_ports = max_ports;
6899 /* allocate ports bound to this host */
6900 for (i = 0; i < max_ports; i++) {
6901 struct ata_port *ap;
6903 ap = ata_port_alloc(host);
6908 host->ports[i] = ap;
6911 devres_remove_group(dev, NULL);
6915 devres_release_group(dev, NULL);
6920 * ata_host_alloc_pinfo - alloc host and init with port_info array
6921 * @dev: generic device this host is associated with
6922 * @ppi: array of ATA port_info to initialize host with
6923 * @n_ports: number of ATA ports attached to this host
6925 * Allocate ATA host and initialize with info from @ppi. If NULL
6926 * terminated, @ppi may contain fewer entries than @n_ports. The
6927 * last entry will be used for the remaining ports.
6930 * Allocate ATA host on success, NULL on failure.
6933 * Inherited from calling layer (may sleep).
6935 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6936 const struct ata_port_info * const * ppi,
6939 const struct ata_port_info *pi;
6940 struct ata_host *host;
6943 host = ata_host_alloc(dev, n_ports);
6947 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6948 struct ata_port *ap = host->ports[i];
6953 ap->pio_mask = pi->pio_mask;
6954 ap->mwdma_mask = pi->mwdma_mask;
6955 ap->udma_mask = pi->udma_mask;
6956 ap->flags |= pi->flags;
6957 ap->link.flags |= pi->link_flags;
6958 ap->ops = pi->port_ops;
6960 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6961 host->ops = pi->port_ops;
6962 if (!host->private_data && pi->private_data)
6963 host->private_data = pi->private_data;
6969 static void ata_host_stop(struct device *gendev, void *res)
6971 struct ata_host *host = dev_get_drvdata(gendev);
6974 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6976 for (i = 0; i < host->n_ports; i++) {
6977 struct ata_port *ap = host->ports[i];
6979 if (ap->ops->port_stop)
6980 ap->ops->port_stop(ap);
6983 if (host->ops->host_stop)
6984 host->ops->host_stop(host);
6988 * ata_host_start - start and freeze ports of an ATA host
6989 * @host: ATA host to start ports for
6991 * Start and then freeze ports of @host. Started status is
6992 * recorded in host->flags, so this function can be called
6993 * multiple times. Ports are guaranteed to get started only
6994 * once. If host->ops isn't initialized yet, its set to the
6995 * first non-dummy port ops.
6998 * Inherited from calling layer (may sleep).
7001 * 0 if all ports are started successfully, -errno otherwise.
7003 int ata_host_start(struct ata_host *host)
7006 void *start_dr = NULL;
7009 if (host->flags & ATA_HOST_STARTED)
7012 for (i = 0; i < host->n_ports; i++) {
7013 struct ata_port *ap = host->ports[i];
7015 if (!host->ops && !ata_port_is_dummy(ap))
7016 host->ops = ap->ops;
7018 if (ap->ops->port_stop)
7022 if (host->ops->host_stop)
7026 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7031 for (i = 0; i < host->n_ports; i++) {
7032 struct ata_port *ap = host->ports[i];
7034 if (ap->ops->port_start) {
7035 rc = ap->ops->port_start(ap);
7038 dev_printk(KERN_ERR, host->dev,
7039 "failed to start port %d "
7040 "(errno=%d)\n", i, rc);
7044 ata_eh_freeze_port(ap);
7048 devres_add(host->dev, start_dr);
7049 host->flags |= ATA_HOST_STARTED;
7054 struct ata_port *ap = host->ports[i];
7056 if (ap->ops->port_stop)
7057 ap->ops->port_stop(ap);
7059 devres_free(start_dr);
7064 * ata_sas_host_init - Initialize a host struct
7065 * @host: host to initialize
7066 * @dev: device host is attached to
7067 * @flags: host flags
7071 * PCI/etc. bus probe sem.
7074 /* KILLME - the only user left is ipr */
7075 void ata_host_init(struct ata_host *host, struct device *dev,
7076 unsigned long flags, const struct ata_port_operations *ops)
7078 spin_lock_init(&host->lock);
7080 host->flags = flags;
7085 * ata_host_register - register initialized ATA host
7086 * @host: ATA host to register
7087 * @sht: template for SCSI host
7089 * Register initialized ATA host. @host is allocated using
7090 * ata_host_alloc() and fully initialized by LLD. This function
7091 * starts ports, registers @host with ATA and SCSI layers and
7092 * probe registered devices.
7095 * Inherited from calling layer (may sleep).
7098 * 0 on success, -errno otherwise.
7100 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7104 /* host must have been started */
7105 if (!(host->flags & ATA_HOST_STARTED)) {
7106 dev_printk(KERN_ERR, host->dev,
7107 "BUG: trying to register unstarted host\n");
7112 /* Blow away unused ports. This happens when LLD can't
7113 * determine the exact number of ports to allocate at
7116 for (i = host->n_ports; host->ports[i]; i++)
7117 kfree(host->ports[i]);
7119 /* give ports names and add SCSI hosts */
7120 for (i = 0; i < host->n_ports; i++)
7121 host->ports[i]->print_id = ata_print_id++;
7123 rc = ata_scsi_add_hosts(host, sht);
7127 /* associate with ACPI nodes */
7128 ata_acpi_associate(host);
7130 /* set cable, sata_spd_limit and report */
7131 for (i = 0; i < host->n_ports; i++) {
7132 struct ata_port *ap = host->ports[i];
7133 unsigned long xfer_mask;
7135 /* set SATA cable type if still unset */
7136 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7137 ap->cbl = ATA_CBL_SATA;
7139 /* init sata_spd_limit to the current value */
7140 sata_link_init_spd(&ap->link);
7142 /* print per-port info to dmesg */
7143 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7146 if (!ata_port_is_dummy(ap)) {
7147 ata_port_printk(ap, KERN_INFO,
7148 "%cATA max %s %s\n",
7149 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7150 ata_mode_string(xfer_mask),
7151 ap->link.eh_info.desc);
7152 ata_ehi_clear_desc(&ap->link.eh_info);
7154 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7157 /* perform each probe synchronously */
7158 DPRINTK("probe begin\n");
7159 for (i = 0; i < host->n_ports; i++) {
7160 struct ata_port *ap = host->ports[i];
7163 if (ap->ops->error_handler) {
7164 struct ata_eh_info *ehi = &ap->link.eh_info;
7165 unsigned long flags;
7169 /* kick EH for boot probing */
7170 spin_lock_irqsave(ap->lock, flags);
7173 (1 << ata_link_max_devices(&ap->link)) - 1;
7174 ehi->action |= ATA_EH_SOFTRESET;
7175 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7177 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7178 ap->pflags |= ATA_PFLAG_LOADING;
7179 ata_port_schedule_eh(ap);
7181 spin_unlock_irqrestore(ap->lock, flags);
7183 /* wait for EH to finish */
7184 ata_port_wait_eh(ap);
7186 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7187 rc = ata_bus_probe(ap);
7188 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7191 /* FIXME: do something useful here?
7192 * Current libata behavior will
7193 * tear down everything when
7194 * the module is removed
7195 * or the h/w is unplugged.
7201 /* probes are done, now scan each port's disk(s) */
7202 DPRINTK("host probe begin\n");
7203 for (i = 0; i < host->n_ports; i++) {
7204 struct ata_port *ap = host->ports[i];
7206 ata_scsi_scan_host(ap, 1);
7207 ata_lpm_schedule(ap, ap->pm_policy);
7214 * ata_host_activate - start host, request IRQ and register it
7215 * @host: target ATA host
7216 * @irq: IRQ to request
7217 * @irq_handler: irq_handler used when requesting IRQ
7218 * @irq_flags: irq_flags used when requesting IRQ
7219 * @sht: scsi_host_template to use when registering the host
7221 * After allocating an ATA host and initializing it, most libata
7222 * LLDs perform three steps to activate the host - start host,
7223 * request IRQ and register it. This helper takes necessasry
7224 * arguments and performs the three steps in one go.
7226 * An invalid IRQ skips the IRQ registration and expects the host to
7227 * have set polling mode on the port. In this case, @irq_handler
7231 * Inherited from calling layer (may sleep).
7234 * 0 on success, -errno otherwise.
7236 int ata_host_activate(struct ata_host *host, int irq,
7237 irq_handler_t irq_handler, unsigned long irq_flags,
7238 struct scsi_host_template *sht)
7242 rc = ata_host_start(host);
7246 /* Special case for polling mode */
7248 WARN_ON(irq_handler);
7249 return ata_host_register(host, sht);
7252 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7253 dev_driver_string(host->dev), host);
7257 for (i = 0; i < host->n_ports; i++)
7258 ata_port_desc(host->ports[i], "irq %d", irq);
7260 rc = ata_host_register(host, sht);
7261 /* if failed, just free the IRQ and leave ports alone */
7263 devm_free_irq(host->dev, irq, host);
7269 * ata_port_detach - Detach ATA port in prepration of device removal
7270 * @ap: ATA port to be detached
7272 * Detach all ATA devices and the associated SCSI devices of @ap;
7273 * then, remove the associated SCSI host. @ap is guaranteed to
7274 * be quiescent on return from this function.
7277 * Kernel thread context (may sleep).
7279 static void ata_port_detach(struct ata_port *ap)
7281 unsigned long flags;
7282 struct ata_link *link;
7283 struct ata_device *dev;
7285 if (!ap->ops->error_handler)
7288 /* tell EH we're leaving & flush EH */
7289 spin_lock_irqsave(ap->lock, flags);
7290 ap->pflags |= ATA_PFLAG_UNLOADING;
7291 spin_unlock_irqrestore(ap->lock, flags);
7293 ata_port_wait_eh(ap);
7295 /* EH is now guaranteed to see UNLOADING - EH context belongs
7296 * to us. Disable all existing devices.
7298 ata_port_for_each_link(link, ap) {
7299 ata_link_for_each_dev(dev, link)
7300 ata_dev_disable(dev);
7303 /* Final freeze & EH. All in-flight commands are aborted. EH
7304 * will be skipped and retrials will be terminated with bad
7307 spin_lock_irqsave(ap->lock, flags);
7308 ata_port_freeze(ap); /* won't be thawed */
7309 spin_unlock_irqrestore(ap->lock, flags);
7311 ata_port_wait_eh(ap);
7312 cancel_rearming_delayed_work(&ap->hotplug_task);
7315 /* remove the associated SCSI host */
7316 scsi_remove_host(ap->scsi_host);
7320 * ata_host_detach - Detach all ports of an ATA host
7321 * @host: Host to detach
7323 * Detach all ports of @host.
7326 * Kernel thread context (may sleep).
7328 void ata_host_detach(struct ata_host *host)
7332 for (i = 0; i < host->n_ports; i++)
7333 ata_port_detach(host->ports[i]);
7335 /* the host is dead now, dissociate ACPI */
7336 ata_acpi_dissociate(host);
7340 * ata_std_ports - initialize ioaddr with standard port offsets.
7341 * @ioaddr: IO address structure to be initialized
7343 * Utility function which initializes data_addr, error_addr,
7344 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7345 * device_addr, status_addr, and command_addr to standard offsets
7346 * relative to cmd_addr.
7348 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7351 void ata_std_ports(struct ata_ioports *ioaddr)
7353 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7354 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7355 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7356 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7357 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7358 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7359 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7360 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7361 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7362 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7369 * ata_pci_remove_one - PCI layer callback for device removal
7370 * @pdev: PCI device that was removed
7372 * PCI layer indicates to libata via this hook that hot-unplug or
7373 * module unload event has occurred. Detach all ports. Resource
7374 * release is handled via devres.
7377 * Inherited from PCI layer (may sleep).
7379 void ata_pci_remove_one(struct pci_dev *pdev)
7381 struct device *dev = &pdev->dev;
7382 struct ata_host *host = dev_get_drvdata(dev);
7384 ata_host_detach(host);
7387 /* move to PCI subsystem */
7388 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7390 unsigned long tmp = 0;
7392 switch (bits->width) {
7395 pci_read_config_byte(pdev, bits->reg, &tmp8);
7401 pci_read_config_word(pdev, bits->reg, &tmp16);
7407 pci_read_config_dword(pdev, bits->reg, &tmp32);
7418 return (tmp == bits->val) ? 1 : 0;
7422 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7424 pci_save_state(pdev);
7425 pci_disable_device(pdev);
7427 if (mesg.event & PM_EVENT_SLEEP)
7428 pci_set_power_state(pdev, PCI_D3hot);
7431 int ata_pci_device_do_resume(struct pci_dev *pdev)
7435 pci_set_power_state(pdev, PCI_D0);
7436 pci_restore_state(pdev);
7438 rc = pcim_enable_device(pdev);
7440 dev_printk(KERN_ERR, &pdev->dev,
7441 "failed to enable device after resume (%d)\n", rc);
7445 pci_set_master(pdev);
7449 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7451 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7454 rc = ata_host_suspend(host, mesg);
7458 ata_pci_device_do_suspend(pdev, mesg);
7463 int ata_pci_device_resume(struct pci_dev *pdev)
7465 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7468 rc = ata_pci_device_do_resume(pdev);
7470 ata_host_resume(host);
7473 #endif /* CONFIG_PM */
7475 #endif /* CONFIG_PCI */
7477 static int __init ata_parse_force_one(char **cur,
7478 struct ata_force_ent *force_ent,
7479 const char **reason)
7481 /* FIXME: Currently, there's no way to tag init const data and
7482 * using __initdata causes build failure on some versions of
7483 * gcc. Once __initdataconst is implemented, add const to the
7484 * following structure.
7486 static struct ata_force_param force_tbl[] __initdata = {
7487 { "40c", .cbl = ATA_CBL_PATA40 },
7488 { "80c", .cbl = ATA_CBL_PATA80 },
7489 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7490 { "unk", .cbl = ATA_CBL_PATA_UNK },
7491 { "ign", .cbl = ATA_CBL_PATA_IGN },
7492 { "sata", .cbl = ATA_CBL_SATA },
7493 { "1.5Gbps", .spd_limit = 1 },
7494 { "3.0Gbps", .spd_limit = 2 },
7495 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7496 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7497 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7498 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7499 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7500 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7501 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7502 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7503 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7504 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7505 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7506 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7507 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7508 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7509 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7510 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7511 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7512 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7513 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7514 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7515 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7516 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7517 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7518 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7519 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7520 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7521 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7522 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7523 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7524 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7525 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7526 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7527 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7528 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7529 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7530 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7532 char *start = *cur, *p = *cur;
7533 char *id, *val, *endp;
7534 const struct ata_force_param *match_fp = NULL;
7535 int nr_matches = 0, i;
7537 /* find where this param ends and update *cur */
7538 while (*p != '\0' && *p != ',')
7549 p = strchr(start, ':');
7551 val = strstrip(start);
7556 id = strstrip(start);
7557 val = strstrip(p + 1);
7560 p = strchr(id, '.');
7563 force_ent->device = simple_strtoul(p, &endp, 10);
7564 if (p == endp || *endp != '\0') {
7565 *reason = "invalid device";
7570 force_ent->port = simple_strtoul(id, &endp, 10);
7571 if (p == endp || *endp != '\0') {
7572 *reason = "invalid port/link";
7577 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7578 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7579 const struct ata_force_param *fp = &force_tbl[i];
7581 if (strncasecmp(val, fp->name, strlen(val)))
7587 if (strcasecmp(val, fp->name) == 0) {
7594 *reason = "unknown value";
7597 if (nr_matches > 1) {
7598 *reason = "ambigious value";
7602 force_ent->param = *match_fp;
7607 static void __init ata_parse_force_param(void)
7609 int idx = 0, size = 1;
7610 int last_port = -1, last_device = -1;
7611 char *p, *cur, *next;
7613 /* calculate maximum number of params and allocate force_tbl */
7614 for (p = ata_force_param_buf; *p; p++)
7618 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7619 if (!ata_force_tbl) {
7620 printk(KERN_WARNING "ata: failed to extend force table, "
7621 "libata.force ignored\n");
7625 /* parse and populate the table */
7626 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7627 const char *reason = "";
7628 struct ata_force_ent te = { .port = -1, .device = -1 };
7631 if (ata_parse_force_one(&next, &te, &reason)) {
7632 printk(KERN_WARNING "ata: failed to parse force "
7633 "parameter \"%s\" (%s)\n",
7638 if (te.port == -1) {
7639 te.port = last_port;
7640 te.device = last_device;
7643 ata_force_tbl[idx++] = te;
7645 last_port = te.port;
7646 last_device = te.device;
7649 ata_force_tbl_size = idx;
7652 static int __init ata_init(void)
7654 ata_probe_timeout *= HZ;
7656 ata_parse_force_param();
7658 ata_wq = create_workqueue("ata");
7662 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7664 destroy_workqueue(ata_wq);
7668 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7672 static void __exit ata_exit(void)
7674 kfree(ata_force_tbl);
7675 destroy_workqueue(ata_wq);
7676 destroy_workqueue(ata_aux_wq);
7679 subsys_initcall(ata_init);
7680 module_exit(ata_exit);
7682 static unsigned long ratelimit_time;
7683 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7685 int ata_ratelimit(void)
7688 unsigned long flags;
7690 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7692 if (time_after(jiffies, ratelimit_time)) {
7694 ratelimit_time = jiffies + (HZ/5);
7698 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7704 * ata_wait_register - wait until register value changes
7705 * @reg: IO-mapped register
7706 * @mask: Mask to apply to read register value
7707 * @val: Wait condition
7708 * @interval_msec: polling interval in milliseconds
7709 * @timeout_msec: timeout in milliseconds
7711 * Waiting for some bits of register to change is a common
7712 * operation for ATA controllers. This function reads 32bit LE
7713 * IO-mapped register @reg and tests for the following condition.
7715 * (*@reg & mask) != val
7717 * If the condition is met, it returns; otherwise, the process is
7718 * repeated after @interval_msec until timeout.
7721 * Kernel thread context (may sleep)
7724 * The final register value.
7726 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7727 unsigned long interval_msec,
7728 unsigned long timeout_msec)
7730 unsigned long timeout;
7733 tmp = ioread32(reg);
7735 /* Calculate timeout _after_ the first read to make sure
7736 * preceding writes reach the controller before starting to
7737 * eat away the timeout.
7739 timeout = jiffies + (timeout_msec * HZ) / 1000;
7741 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7742 msleep(interval_msec);
7743 tmp = ioread32(reg);
7752 static void ata_dummy_noret(struct ata_port *ap) { }
7753 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7754 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7756 static u8 ata_dummy_check_status(struct ata_port *ap)
7761 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7763 return AC_ERR_SYSTEM;
7766 const struct ata_port_operations ata_dummy_port_ops = {
7767 .check_status = ata_dummy_check_status,
7768 .check_altstatus = ata_dummy_check_status,
7769 .dev_select = ata_noop_dev_select,
7770 .qc_prep = ata_noop_qc_prep,
7771 .qc_issue = ata_dummy_qc_issue,
7772 .freeze = ata_dummy_noret,
7773 .thaw = ata_dummy_noret,
7774 .error_handler = ata_dummy_noret,
7775 .post_internal_cmd = ata_dummy_qc_noret,
7776 .irq_clear = ata_dummy_noret,
7777 .port_start = ata_dummy_ret0,
7778 .port_stop = ata_dummy_noret,
7781 const struct ata_port_info ata_dummy_port_info = {
7782 .port_ops = &ata_dummy_port_ops,
7786 * libata is essentially a library of internal helper functions for
7787 * low-level ATA host controller drivers. As such, the API/ABI is
7788 * likely to change as new drivers are added and updated.
7789 * Do not depend on ABI/API stability.
7791 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7792 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7793 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7794 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7795 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7796 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7797 EXPORT_SYMBOL_GPL(ata_std_ports);
7798 EXPORT_SYMBOL_GPL(ata_host_init);
7799 EXPORT_SYMBOL_GPL(ata_host_alloc);
7800 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7801 EXPORT_SYMBOL_GPL(ata_host_start);
7802 EXPORT_SYMBOL_GPL(ata_host_register);
7803 EXPORT_SYMBOL_GPL(ata_host_activate);
7804 EXPORT_SYMBOL_GPL(ata_host_detach);
7805 EXPORT_SYMBOL_GPL(ata_sg_init);
7806 EXPORT_SYMBOL_GPL(ata_hsm_move);
7807 EXPORT_SYMBOL_GPL(ata_qc_complete);
7808 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7809 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7810 EXPORT_SYMBOL_GPL(ata_tf_load);
7811 EXPORT_SYMBOL_GPL(ata_tf_read);
7812 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7813 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7814 EXPORT_SYMBOL_GPL(sata_print_link_status);
7815 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7816 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7817 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7818 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7819 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7820 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7821 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7822 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7823 EXPORT_SYMBOL_GPL(ata_mode_string);
7824 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7825 EXPORT_SYMBOL_GPL(ata_check_status);
7826 EXPORT_SYMBOL_GPL(ata_altstatus);
7827 EXPORT_SYMBOL_GPL(ata_exec_command);
7828 EXPORT_SYMBOL_GPL(ata_port_start);
7829 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7830 EXPORT_SYMBOL_GPL(ata_interrupt);
7831 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7832 EXPORT_SYMBOL_GPL(ata_data_xfer);
7833 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7834 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7835 EXPORT_SYMBOL_GPL(ata_qc_prep);
7836 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7837 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7838 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7839 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7840 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7841 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7842 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7843 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7844 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7845 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7846 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7847 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7848 EXPORT_SYMBOL_GPL(ata_port_probe);
7849 EXPORT_SYMBOL_GPL(ata_dev_disable);
7850 EXPORT_SYMBOL_GPL(sata_set_spd);
7851 EXPORT_SYMBOL_GPL(sata_link_debounce);
7852 EXPORT_SYMBOL_GPL(sata_link_resume);
7853 EXPORT_SYMBOL_GPL(ata_bus_reset);
7854 EXPORT_SYMBOL_GPL(ata_std_prereset);
7855 EXPORT_SYMBOL_GPL(ata_std_softreset);
7856 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7857 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7858 EXPORT_SYMBOL_GPL(ata_std_postreset);
7859 EXPORT_SYMBOL_GPL(ata_dev_classify);
7860 EXPORT_SYMBOL_GPL(ata_dev_pair);
7861 EXPORT_SYMBOL_GPL(ata_port_disable);
7862 EXPORT_SYMBOL_GPL(ata_ratelimit);
7863 EXPORT_SYMBOL_GPL(ata_wait_register);
7864 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7865 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7866 EXPORT_SYMBOL_GPL(ata_wait_ready);
7867 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7868 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7869 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7870 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7871 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7872 EXPORT_SYMBOL_GPL(ata_host_intr);
7873 EXPORT_SYMBOL_GPL(sata_scr_valid);
7874 EXPORT_SYMBOL_GPL(sata_scr_read);
7875 EXPORT_SYMBOL_GPL(sata_scr_write);
7876 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7877 EXPORT_SYMBOL_GPL(ata_link_online);
7878 EXPORT_SYMBOL_GPL(ata_link_offline);
7880 EXPORT_SYMBOL_GPL(ata_host_suspend);
7881 EXPORT_SYMBOL_GPL(ata_host_resume);
7882 #endif /* CONFIG_PM */
7883 EXPORT_SYMBOL_GPL(ata_id_string);
7884 EXPORT_SYMBOL_GPL(ata_id_c_string);
7885 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7887 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7888 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7889 EXPORT_SYMBOL_GPL(ata_timing_compute);
7890 EXPORT_SYMBOL_GPL(ata_timing_merge);
7891 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7894 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7895 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7896 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7897 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7898 EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
7899 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7900 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7902 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7903 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7904 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7905 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7906 #endif /* CONFIG_PM */
7907 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7908 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7909 #endif /* CONFIG_PCI */
7911 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7912 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7913 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7914 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7915 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7917 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7918 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7919 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7920 EXPORT_SYMBOL_GPL(ata_port_desc);
7922 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7923 #endif /* CONFIG_PCI */
7924 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7925 EXPORT_SYMBOL_GPL(ata_link_abort);
7926 EXPORT_SYMBOL_GPL(ata_port_abort);
7927 EXPORT_SYMBOL_GPL(ata_port_freeze);
7928 EXPORT_SYMBOL_GPL(sata_async_notification);
7929 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7930 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7931 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7932 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7933 EXPORT_SYMBOL_GPL(ata_do_eh);
7934 EXPORT_SYMBOL_GPL(ata_irq_on);
7935 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7937 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7938 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7939 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7940 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7941 EXPORT_SYMBOL_GPL(ata_cable_sata);