/**
* ata_rwcmd_protocol - set taskfile r/w commands and protocol
- * @qc: command to examine and configure
+ * @tf: command to examine and configure
+ * @dev: device tf belongs to
*
* Examine the device configuration and tf->flags to calculate
* the proper read/write commands and protocol to use.
* LOCKING:
* caller.
*/
-int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
{
- struct ata_taskfile *tf = &qc->tf;
- struct ata_device *dev = qc->dev;
u8 cmd;
int index, fua, lba48, write;
if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
- } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
+ } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
/* Unable to use DMA due to host limitation */
tf->protocol = ATA_PROT_PIO;
index = dev->multi_count ? 0 : 8;
return -1;
}
+/**
+ * ata_tf_read_block - Read block address from ATA taskfile
+ * @tf: ATA taskfile of interest
+ * @dev: ATA device @tf belongs to
+ *
+ * LOCKING:
+ * None.
+ *
+ * Read block address from @tf. This function can handle all
+ * three address formats - LBA, LBA48 and CHS. tf->protocol and
+ * flags select the address format to use.
+ *
+ * RETURNS:
+ * Block address read from @tf.
+ */
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+{
+ u64 block = 0;
+
+ if (tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ block |= (u64)tf->hob_lbah << 40;
+ block |= (u64)tf->hob_lbam << 32;
+ block |= tf->hob_lbal << 24;
+ } else
+ block |= (tf->device & 0xf) << 24;
+
+ block |= tf->lbah << 16;
+ block |= tf->lbam << 8;
+ block |= tf->lbal;
+ } else {
+ u32 cyl, head, sect;
+
+ cyl = tf->lbam | (tf->lbah << 8);
+ head = tf->device & 0xf;
+ sect = tf->lbal;
+
+ block = (cyl * dev->heads + head) * dev->sectors + sect;
+ }
+
+ return block;
+}
+
+/**
+ * ata_build_rw_tf - Build ATA taskfile for given read/write request
+ * @tf: Target ATA taskfile
+ * @dev: ATA device @tf belongs to
+ * @block: Block address
+ * @n_block: Number of blocks
+ * @tf_flags: RW/FUA etc...
+ * @tag: tag
+ *
+ * LOCKING:
+ * None.
+ *
+ * Build ATA taskfile @tf for read/write request described by
+ * @block, @n_block, @tf_flags and @tag on @dev.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -ERANGE if the request is too large for @dev,
+ * -EINVAL if the request is invalid.
+ */
+int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+ u64 block, u32 n_block, unsigned int tf_flags,
+ unsigned int tag)
+{
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->flags |= tf_flags;
+
+ if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
+ ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
+ likely(tag != ATA_TAG_INTERNAL)) {
+ /* yay, NCQ */
+ if (!lba_48_ok(block, n_block))
+ return -ERANGE;
+
+ tf->protocol = ATA_PROT_NCQ;
+ tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+
+ if (tf->flags & ATA_TFLAG_WRITE)
+ tf->command = ATA_CMD_FPDMA_WRITE;
+ else
+ tf->command = ATA_CMD_FPDMA_READ;
+
+ tf->nsect = tag << 3;
+ tf->hob_feature = (n_block >> 8) & 0xff;
+ tf->feature = n_block & 0xff;
+
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+
+ tf->device = 1 << 6;
+ if (tf->flags & ATA_TFLAG_FUA)
+ tf->device |= 1 << 7;
+ } else if (dev->flags & ATA_DFLAG_LBA) {
+ tf->flags |= ATA_TFLAG_LBA;
+
+ if (lba_28_ok(block, n_block)) {
+ /* use LBA28 */
+ tf->device |= (block >> 24) & 0xf;
+ } else if (lba_48_ok(block, n_block)) {
+ if (!(dev->flags & ATA_DFLAG_LBA48))
+ return -ERANGE;
+
+ /* use LBA48 */
+ tf->flags |= ATA_TFLAG_LBA48;
+
+ tf->hob_nsect = (n_block >> 8) & 0xff;
+
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ } else
+ /* request too large even for LBA48 */
+ return -ERANGE;
+
+ if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ return -EINVAL;
+
+ tf->nsect = n_block & 0xff;
+
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+
+ tf->device |= ATA_LBA;
+ } else {
+ /* CHS */
+ u32 sect, head, cyl, track;
+
+ /* The request -may- be too large for CHS addressing. */
+ if (!lba_28_ok(block, n_block))
+ return -ERANGE;
+
+ if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ return -EINVAL;
+
+ /* Convert LBA to CHS */
+ track = (u32)block / dev->sectors;
+ cyl = track / dev->heads;
+ head = track % dev->heads;
+ sect = (u32)block % dev->sectors + 1;
+
+ DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+ (u32)block, track, cyl, head, sect);
+
+ /* Check whether the converted CHS can fit.
+ Cylinder: 0-65535
+ Head: 0-15
+ Sector: 1-255*/
+ if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+ return -ERANGE;
+
+ tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+ tf->lbal = sect;
+ tf->lbam = cyl;
+ tf->lbah = cyl >> 8;
+ tf->device |= head;
+ }
+
+ return 0;
+}
+
/**
* ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
* @pio_mask: pio_mask
* the PIO timing number for the maximum. Turn it into
* a mask.
*/
- u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
+ u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
if (mode < 5) /* Valid PIO range */
pio_mask = (2 << mode) - 1;
else
* ata_port_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled
- * @data: data value to pass to workqueue function
+ * @data: data for @fn to use
* @delay: delay time for workqueue function
*
* Schedule @fn(@data) for execution after @delay jiffies using
* LOCKING:
* Inherited from caller.
*/
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
unsigned long delay)
{
int rc;
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
- PREPARE_WORK(&ap->port_task, fn, data);
+ PREPARE_DELAYED_WORK(&ap->port_task, fn);
+ ap->port_task_data = data;
- if (!delay)
- rc = queue_work(ata_wq, &ap->port_task);
- else
- rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+ rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
/* rc == 0 means that another user is using port task */
WARN_ON(rc == 0);
ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
}
-void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
struct completion *waiting = qc->private_data;
}
/**
- * ata_exec_internal - execute libata internal command
+ * ata_exec_internal_sg - execute libata internal command
* @dev: Device to which the command is sent
* @tf: Taskfile registers for the command and the result
* @cdb: CDB for packet command
* @dma_dir: Data tranfer direction of the command
- * @buf: Data buffer of the command
- * @buflen: Length of data buffer
+ * @sg: sg list for the data buffer of the command
+ * @n_elem: Number of sg entries
*
* Executes libata internal command with timeout. @tf contains
* command on entry and result on return. Timeout and error
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-unsigned ata_exec_internal(struct ata_device *dev,
- struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, void *buf, unsigned int buflen)
+unsigned ata_exec_internal_sg(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, struct scatterlist *sg,
+ unsigned int n_elem)
{
struct ata_port *ap = dev->ap;
u8 command = tf->command;
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
if (dma_dir != DMA_NONE) {
- ata_sg_init_one(qc, buf, buflen);
- qc->nsect = buflen / ATA_SECT_SIZE;
+ unsigned int i, buflen = 0;
+
+ for (i = 0; i < n_elem; i++)
+ buflen += sg[i].length;
+
+ ata_sg_init(qc, sg, n_elem);
+ qc->nbytes = buflen;
}
qc->private_data = &wait;
if (ap->ops->post_internal_cmd)
ap->ops->post_internal_cmd(qc);
- if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
+ if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
if (ata_msg_warn(ap))
ata_dev_printk(dev, KERN_WARNING,
"zero err_mask for failed "
return err_mask;
}
+/**
+ * ata_exec_internal - execute libata internal command
+ * @dev: Device to which the command is sent
+ * @tf: Taskfile registers for the command and the result
+ * @cdb: CDB for packet command
+ * @dma_dir: Data tranfer direction of the command
+ * @buf: Data buffer of the command
+ * @buflen: Length of data buffer
+ *
+ * Wrapper around ata_exec_internal_sg() which takes simple
+ * buffer instead of sg list.
+ *
+ * LOCKING:
+ * None. Should be called with kernel context, might sleep.
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned ata_exec_internal(struct ata_device *dev,
+ struct ata_taskfile *tf, const u8 *cdb,
+ int dma_dir, void *buf, unsigned int buflen)
+{
+ struct scatterlist *psg = NULL, sg;
+ unsigned int n_elem = 0;
+
+ if (dma_dir != DMA_NONE) {
+ WARN_ON(!buf);
+ sg_init_one(&sg, buf, buflen);
+ psg = &sg;
+ n_elem++;
+ }
+
+ return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
+}
+
/**
* ata_do_simple_cmd - execute simple internal command
* @dev: Device to which the command is sent
}
tf.protocol = ATA_PROT_PIO;
-
- /* presence detection using polling IDENTIFY? */
- if (flags & ATA_READID_DETECT)
- tf.flags |= ATA_TFLAG_POLLING;
+ tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS);
if (err_mask) {
- if ((flags & ATA_READID_DETECT) &&
- (err_mask & AC_ERR_NODEV_HINT)) {
+ if (err_mask & AC_ERR_NODEV_HINT) {
DPRINTK("ata%u.%d: NODEV after polling detection\n",
ap->id, dev->devno);
return -ENOENT;
* DMA cycle timing is slower/equal than the fastest PIO timing.
*/
- if (speed > XFER_PIO_4) {
+ if (speed > XFER_PIO_6) {
ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
}
int i, rc = 0, used_dma = 0, found = 0;
/* has private set_mode? */
- if (ap->ops->set_mode) {
- /* FIXME: make ->set_mode handle no device case and
- * return error code and failing device on failure.
- */
- for (i = 0; i < ATA_MAX_DEVICES; i++) {
- if (ata_dev_ready(&ap->device[i])) {
- ap->ops->set_mode(ap);
- break;
- }
- }
- return 0;
- }
+ if (ap->ops->set_mode)
+ return ap->ops->set_mode(ap, r_failed_dev);
/* step 1: calculate xfer_mask */
for (i = 0; i < ATA_MAX_DEVICES; i++) {
for (i = 0; i < ATA_MAX_DEVICES; i++) {
dev = &ap->device[i];
- /* don't udpate suspended devices' xfer mode */
+ /* don't update suspended devices' xfer mode */
if (!ata_dev_ready(dev))
continue;
const u16 *new_id)
{
const u16 *old_id = dev->id;
- unsigned char model[2][41], serial[2][21];
+ unsigned char model[2][ATA_ID_PROD_LEN + 1];
+ unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
u64 new_n_sectors;
if (dev->class != new_class) {
return 0;
}
- ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
- ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
- ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
- ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
+ ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
+ ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
+ ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
+ ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
new_n_sectors = ata_id_n_sectors(new_id);
if (strcmp(model[0], model[1])) {
{ }
};
-static int ata_strim(char *s, size_t len)
-{
- len = strnlen(s, len);
-
- /* ATAPI specifies that empty space is blank-filled; remove blanks */
- while ((len > 0) && (s[len - 1] == ' ')) {
- len--;
- s[len] = 0;
- }
- return len;
-}
-
unsigned long ata_device_blacklisted(const struct ata_device *dev)
{
- unsigned char model_num[40];
- unsigned char model_rev[16];
- unsigned int nlen, rlen;
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+ unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
const struct ata_blacklist_entry *ad = ata_device_blacklist;
- ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
- sizeof(model_num));
- ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
- sizeof(model_rev));
- nlen = ata_strim(model_num, sizeof(model_num));
- rlen = ata_strim(model_rev, sizeof(model_rev));
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+ ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (!strncmp(ad->model_num, model_num, nlen)) {
+ if (!strcmp(ad->model_num, model_num)) {
if (ad->model_rev == NULL)
return ad->horkage;
- if (!strncmp(ad->model_rev, model_rev, rlen))
+ if (!strcmp(ad->model_rev, model_rev))
return ad->horkage;
}
ad++;
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-
-static void ata_sg_clean(struct ata_queued_cmd *qc)
+void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->__sg;
unsigned int offset;
unsigned char *buf;
- if (qc->cursect == (qc->nsect - 1))
+ if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
ap->hsm_task_state = HSM_ST_LAST;
page = sg[qc->cursg].page;
- offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
+ offset = sg[qc->cursg].offset + qc->cursg_ofs;
/* get the current page and offset */
page = nth_page(page, (offset >> PAGE_SHIFT));
ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
}
- qc->cursect++;
- qc->cursg_ofs++;
+ qc->curbytes += ATA_SECT_SIZE;
+ qc->cursg_ofs += ATA_SECT_SIZE;
- if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
+ if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
qc->cursg++;
qc->cursg_ofs = 0;
}
WARN_ON(qc->dev->multi_count == 0);
- nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
+ nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
+ qc->dev->multi_count);
while (nsect--)
ata_pio_sector(qc);
} else
return poll_next;
}
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
{
- struct ata_queued_cmd *qc = _data;
- struct ata_port *ap = qc->ap;
+ struct ata_port *ap =
+ container_of(work, struct ata_port, port_task.work);
+ struct ata_queued_cmd *qc = ap->port_task_data;
u8 status;
int poll_next;
qc->complete_fn(qc);
}
+static void fill_result_tf(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+ ap->ops->tf_read(ap, &qc->result_tf);
+ qc->result_tf.flags = qc->tf.flags;
+}
+
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
if (!ata_tag_internal(qc->tag)) {
/* always fill result TF for failed qc */
- ap->ops->tf_read(ap, &qc->result_tf);
+ fill_result_tf(qc);
ata_qc_schedule_eh(qc);
return;
}
/* read result TF if requested */
if (qc->flags & ATA_QCFLAG_RESULT_TF)
- ap->ops->tf_read(ap, &qc->result_tf);
+ fill_result_tf(qc);
__ata_qc_complete(qc);
} else {
/* read result TF if failed or requested */
if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
- ap->ops->tf_read(ap, &qc->result_tf);
+ fill_result_tf(qc);
__ata_qc_complete(qc);
}
if (ap->flags & ATA_FLAG_PIO_POLLING) {
switch (qc->tf.protocol) {
case ATA_PROT_PIO:
+ case ATA_PROT_NODATA:
case ATA_PROT_ATAPI:
case ATA_PROT_ATAPI_NODATA:
qc->tf.flags |= ATA_TFLAG_POLLING;
}
}
+ /* Some controllers show flaky interrupt behavior after
+ * setting xfer mode. Use polling instead.
+ */
+ if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
+ qc->tf.feature == SETFEATURES_XFER) &&
+ (ap->flags & ATA_FLAG_SETXFER_POLLING))
+ qc->tf.flags |= ATA_TFLAG_POLLING;
+
/* select the device */
ata_dev_select(ap, qc->dev->devno, 1, 0);
inline unsigned int ata_host_intr (struct ata_port *ap,
struct ata_queued_cmd *qc)
{
+ struct ata_eh_info *ehi = &ap->eh_info;
u8 status, host_stat = 0;
VPRINTK("ata%u: protocol %d task_state %d\n",
ap->ops->irq_clear(ap);
ata_hsm_move(ap, qc, status, 0);
+
+ if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+ qc->tf.protocol == ATA_PROT_ATAPI_DMA))
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
return 1; /* irq handled */
idle_irq:
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
- INIT_WORK(&ap->port_task, NULL, NULL);
- INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
- INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+ INIT_DELAYED_WORK(&ap->port_task, NULL);
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+ INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
int rc;
DPRINTK("ENTER\n");
-
+
if (ent->irq == 0) {
dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
return 0;
ap->ioaddr.bmdma_addr,
irq_line);
- ata_chk_status(ap);
- host->ops->irq_clear(ap);
- ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
+ /* freeze port before requesting IRQ */
+ ata_eh_freeze_port(ap);
}
/* obtain irq, that may be shared between channels */
}
}
-void ata_pci_device_do_resume(struct pci_dev *pdev)
+int ata_pci_device_do_resume(struct pci_dev *pdev)
{
+ int rc;
+
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_enable_device(pdev);
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
+ return rc;
+ }
+
pci_set_master(pdev);
+ return 0;
}
int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
int ata_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
- ata_pci_device_do_resume(pdev);
- ata_host_resume(host);
- return 0;
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc == 0)
+ ata_host_resume(host);
+ return rc;
}
#endif /* CONFIG_PCI */