2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_cmnd.h>
42 #include "scsi_transport_api.h"
44 #include <linux/libata.h>
48 static void __ata_port_freeze(struct ata_port *ap);
49 static void ata_eh_finish(struct ata_port *ap);
51 static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
54 struct ata_ering_entry *ent;
59 ering->cursor %= ATA_ERING_SIZE;
61 ent = &ering->ring[ering->cursor];
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
67 static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
75 static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
80 struct ata_ering_entry *ent;
84 ent = &ering->ring[idx];
87 rc = map_fn(ent, arg);
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
96 static void ata_eh_clear_action(struct ata_device *dev,
97 struct ata_eh_info *ehi, unsigned int action)
102 ehi->action &= ~action;
103 for (i = 0; i < ATA_MAX_DEVICES; i++)
104 ehi->dev_action[i] &= ~action;
106 /* doesn't make sense for port-wide EH actions */
107 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
109 /* break ehi->action into ehi->dev_action */
110 if (ehi->action & action) {
111 for (i = 0; i < ATA_MAX_DEVICES; i++)
112 ehi->dev_action[i] |= ehi->action & action;
113 ehi->action &= ~action;
116 /* turn off the specified per-dev action */
117 ehi->dev_action[dev->devno] &= ~action;
122 * ata_scsi_timed_out - SCSI layer time out callback
123 * @cmd: timed out SCSI command
125 * Handles SCSI layer timeout. We race with normal completion of
126 * the qc for @cmd. If the qc is already gone, we lose and let
127 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
128 * timed out and EH should be invoked. Prevent ata_qc_complete()
129 * from finishing it by setting EH_SCHEDULED and return
132 * TODO: kill this function once old EH is gone.
135 * Called from timer context
138 * EH_HANDLED or EH_NOT_HANDLED
140 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
142 struct Scsi_Host *host = cmd->device->host;
143 struct ata_port *ap = ata_shost_to_port(host);
145 struct ata_queued_cmd *qc;
146 enum scsi_eh_timer_return ret;
150 if (ap->ops->error_handler) {
151 ret = EH_NOT_HANDLED;
156 spin_lock_irqsave(ap->lock, flags);
157 qc = ata_qc_from_tag(ap, ap->active_tag);
159 WARN_ON(qc->scsicmd != cmd);
160 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
161 qc->err_mask |= AC_ERR_TIMEOUT;
162 ret = EH_NOT_HANDLED;
164 spin_unlock_irqrestore(ap->lock, flags);
167 DPRINTK("EXIT, ret=%d\n", ret);
172 * ata_scsi_error - SCSI layer error handler callback
173 * @host: SCSI host on which error occurred
175 * Handles SCSI-layer-thrown error events.
178 * Inherited from SCSI layer (none, can sleep)
183 void ata_scsi_error(struct Scsi_Host *host)
185 struct ata_port *ap = ata_shost_to_port(host);
186 spinlock_t *ap_lock = ap->lock;
187 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
192 /* synchronize with port task */
193 ata_port_flush_task(ap);
195 /* synchronize with host_set lock and sort out timeouts */
197 /* For new EH, all qcs are finished in one of three ways -
198 * normal completion, error completion, and SCSI timeout.
199 * Both cmpletions can race against SCSI timeout. When normal
200 * completion wins, the qc never reaches EH. When error
201 * completion wins, the qc has ATA_QCFLAG_FAILED set.
203 * When SCSI timeout wins, things are a bit more complex.
204 * Normal or error completion can occur after the timeout but
205 * before this point. In such cases, both types of
206 * completions are honored. A scmd is determined to have
207 * timed out iff its associated qc is active and not failed.
209 if (ap->ops->error_handler) {
210 struct scsi_cmnd *scmd, *tmp;
213 spin_lock_irqsave(ap_lock, flags);
215 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
216 struct ata_queued_cmd *qc;
218 for (i = 0; i < ATA_MAX_QUEUE; i++) {
219 qc = __ata_qc_from_tag(ap, i);
220 if (qc->flags & ATA_QCFLAG_ACTIVE &&
225 if (i < ATA_MAX_QUEUE) {
226 /* the scmd has an associated qc */
227 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
228 /* which hasn't failed yet, timeout */
229 qc->err_mask |= AC_ERR_TIMEOUT;
230 qc->flags |= ATA_QCFLAG_FAILED;
234 /* Normal completion occurred after
235 * SCSI timeout but before this point.
236 * Successfully complete it.
238 scmd->retries = scmd->allowed;
239 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
243 /* If we have timed out qcs. They belong to EH from
244 * this point but the state of the controller is
245 * unknown. Freeze the port to make sure the IRQ
246 * handler doesn't diddle with those qcs. This must
247 * be done atomically w.r.t. setting QCFLAG_FAILED.
250 __ata_port_freeze(ap);
252 spin_unlock_irqrestore(ap_lock, flags);
254 spin_unlock_wait(ap_lock);
257 /* invoke error handler */
258 if (ap->ops->error_handler) {
259 /* fetch & clear EH info */
260 spin_lock_irqsave(ap_lock, flags);
262 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
263 ap->eh_context.i = ap->eh_info;
264 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
266 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
267 ap->flags &= ~ATA_FLAG_EH_PENDING;
269 spin_unlock_irqrestore(ap_lock, flags);
271 /* invoke EH. if unloading, just finish failed qcs */
272 if (!(ap->flags & ATA_FLAG_UNLOADING))
273 ap->ops->error_handler(ap);
277 /* Exception might have happend after ->error_handler
278 * recovered the port but before this point. Repeat
281 spin_lock_irqsave(ap_lock, flags);
283 if (ap->flags & ATA_FLAG_EH_PENDING) {
285 ata_port_printk(ap, KERN_INFO,
286 "EH pending after completion, "
287 "repeating EH (cnt=%d)\n", repeat_cnt);
288 spin_unlock_irqrestore(ap_lock, flags);
291 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
292 "tries, giving up\n", ATA_EH_MAX_REPEAT);
295 /* this run is complete, make sure EH info is clear */
296 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
298 /* Clear host_eh_scheduled while holding ap_lock such
299 * that if exception occurs after this point but
300 * before EH completion, SCSI midlayer will
303 host->host_eh_scheduled = 0;
305 spin_unlock_irqrestore(ap_lock, flags);
307 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
308 ap->ops->eng_timeout(ap);
311 /* finish or retry handled scmd's and clean up */
312 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
314 scsi_eh_flush_done_q(&ap->eh_done_q);
317 spin_lock_irqsave(ap_lock, flags);
319 if (ap->flags & ATA_FLAG_LOADING) {
320 ap->flags &= ~ATA_FLAG_LOADING;
322 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
323 queue_work(ata_aux_wq, &ap->hotplug_task);
324 if (ap->flags & ATA_FLAG_RECOVERED)
325 ata_port_printk(ap, KERN_INFO, "EH complete\n");
328 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
330 /* tell wait_eh that we're done */
331 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
332 wake_up_all(&ap->eh_wait_q);
334 spin_unlock_irqrestore(ap_lock, flags);
340 * ata_port_wait_eh - Wait for the currently pending EH to complete
341 * @ap: Port to wait EH for
343 * Wait until the currently pending EH is complete.
346 * Kernel thread context (may sleep).
348 void ata_port_wait_eh(struct ata_port *ap)
354 spin_lock_irqsave(ap->lock, flags);
356 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
357 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
358 spin_unlock_irqrestore(ap->lock, flags);
360 spin_lock_irqsave(ap->lock, flags);
362 finish_wait(&ap->eh_wait_q, &wait);
364 spin_unlock_irqrestore(ap->lock, flags);
366 /* make sure SCSI EH is complete */
367 if (scsi_host_in_recovery(ap->host)) {
374 * ata_qc_timeout - Handle timeout of queued command
375 * @qc: Command that timed out
377 * Some part of the kernel (currently, only the SCSI layer)
378 * has noticed that the active command on port @ap has not
379 * completed after a specified length of time. Handle this
380 * condition by disabling DMA (if necessary) and completing
381 * transactions, with error if necessary.
383 * This also handles the case of the "lost interrupt", where
384 * for some reason (possibly hardware bug, possibly driver bug)
385 * an interrupt was not delivered to the driver, even though the
386 * transaction completed successfully.
388 * TODO: kill this function once old EH is gone.
391 * Inherited from SCSI layer (none, can sleep)
393 static void ata_qc_timeout(struct ata_queued_cmd *qc)
395 struct ata_port *ap = qc->ap;
396 u8 host_stat = 0, drv_stat;
401 ap->hsm_task_state = HSM_ST_IDLE;
403 spin_lock_irqsave(ap->lock, flags);
405 switch (qc->tf.protocol) {
408 case ATA_PROT_ATAPI_DMA:
409 host_stat = ap->ops->bmdma_status(ap);
411 /* before we do anything else, clear DMA-Start bit */
412 ap->ops->bmdma_stop(qc);
418 drv_stat = ata_chk_status(ap);
420 /* ack bmdma irq events */
421 ap->ops->irq_clear(ap);
423 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
424 "stat 0x%x host_stat 0x%x\n",
425 qc->tf.command, drv_stat, host_stat);
427 /* complete taskfile transaction */
428 qc->err_mask |= AC_ERR_TIMEOUT;
432 spin_unlock_irqrestore(ap->lock, flags);
434 ata_eh_qc_complete(qc);
440 * ata_eng_timeout - Handle timeout of queued command
441 * @ap: Port on which timed-out command is active
443 * Some part of the kernel (currently, only the SCSI layer)
444 * has noticed that the active command on port @ap has not
445 * completed after a specified length of time. Handle this
446 * condition by disabling DMA (if necessary) and completing
447 * transactions, with error if necessary.
449 * This also handles the case of the "lost interrupt", where
450 * for some reason (possibly hardware bug, possibly driver bug)
451 * an interrupt was not delivered to the driver, even though the
452 * transaction completed successfully.
454 * TODO: kill this function once old EH is gone.
457 * Inherited from SCSI layer (none, can sleep)
459 void ata_eng_timeout(struct ata_port *ap)
463 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
469 * ata_qc_schedule_eh - schedule qc for error handling
470 * @qc: command to schedule error handling for
472 * Schedule error handling for @qc. EH will kick in as soon as
473 * other commands are drained.
476 * spin_lock_irqsave(host_set lock)
478 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
480 struct ata_port *ap = qc->ap;
482 WARN_ON(!ap->ops->error_handler);
484 qc->flags |= ATA_QCFLAG_FAILED;
485 qc->ap->flags |= ATA_FLAG_EH_PENDING;
487 /* The following will fail if timeout has already expired.
488 * ata_scsi_error() takes care of such scmds on EH entry.
489 * Note that ATA_QCFLAG_FAILED is unconditionally set after
490 * this function completes.
492 scsi_req_abort_cmd(qc->scsicmd);
496 * ata_port_schedule_eh - schedule error handling without a qc
497 * @ap: ATA port to schedule EH for
499 * Schedule error handling for @ap. EH will kick in as soon as
500 * all commands are drained.
503 * spin_lock_irqsave(host_set lock)
505 void ata_port_schedule_eh(struct ata_port *ap)
507 WARN_ON(!ap->ops->error_handler);
509 ap->flags |= ATA_FLAG_EH_PENDING;
510 scsi_schedule_eh(ap->host);
512 DPRINTK("port EH scheduled\n");
516 * ata_port_abort - abort all qc's on the port
517 * @ap: ATA port to abort qc's for
519 * Abort all active qc's of @ap and schedule EH.
522 * spin_lock_irqsave(host_set lock)
525 * Number of aborted qc's.
527 int ata_port_abort(struct ata_port *ap)
529 int tag, nr_aborted = 0;
531 WARN_ON(!ap->ops->error_handler);
533 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
534 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
537 qc->flags |= ATA_QCFLAG_FAILED;
544 ata_port_schedule_eh(ap);
550 * __ata_port_freeze - freeze port
551 * @ap: ATA port to freeze
553 * This function is called when HSM violation or some other
554 * condition disrupts normal operation of the port. Frozen port
555 * is not allowed to perform any operation until the port is
556 * thawed, which usually follows a successful reset.
558 * ap->ops->freeze() callback can be used for freezing the port
559 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
560 * port cannot be frozen hardware-wise, the interrupt handler
561 * must ack and clear interrupts unconditionally while the port
565 * spin_lock_irqsave(host_set lock)
567 static void __ata_port_freeze(struct ata_port *ap)
569 WARN_ON(!ap->ops->error_handler);
574 ap->flags |= ATA_FLAG_FROZEN;
576 DPRINTK("ata%u port frozen\n", ap->id);
580 * ata_port_freeze - abort & freeze port
581 * @ap: ATA port to freeze
583 * Abort and freeze @ap.
586 * spin_lock_irqsave(host_set lock)
589 * Number of aborted commands.
591 int ata_port_freeze(struct ata_port *ap)
595 WARN_ON(!ap->ops->error_handler);
597 nr_aborted = ata_port_abort(ap);
598 __ata_port_freeze(ap);
604 * ata_eh_freeze_port - EH helper to freeze port
605 * @ap: ATA port to freeze
612 void ata_eh_freeze_port(struct ata_port *ap)
616 if (!ap->ops->error_handler)
619 spin_lock_irqsave(ap->lock, flags);
620 __ata_port_freeze(ap);
621 spin_unlock_irqrestore(ap->lock, flags);
625 * ata_port_thaw_port - EH helper to thaw port
626 * @ap: ATA port to thaw
628 * Thaw frozen port @ap.
633 void ata_eh_thaw_port(struct ata_port *ap)
637 if (!ap->ops->error_handler)
640 spin_lock_irqsave(ap->lock, flags);
642 ap->flags &= ~ATA_FLAG_FROZEN;
647 spin_unlock_irqrestore(ap->lock, flags);
649 DPRINTK("ata%u port thawed\n", ap->id);
652 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
657 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
659 struct ata_port *ap = qc->ap;
660 struct scsi_cmnd *scmd = qc->scsicmd;
663 spin_lock_irqsave(ap->lock, flags);
664 qc->scsidone = ata_eh_scsidone;
665 __ata_qc_complete(qc);
666 WARN_ON(ata_tag_valid(qc->tag));
667 spin_unlock_irqrestore(ap->lock, flags);
669 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
673 * ata_eh_qc_complete - Complete an active ATA command from EH
674 * @qc: Command to complete
676 * Indicate to the mid and upper layers that an ATA command has
677 * completed. To be used from EH.
679 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
681 struct scsi_cmnd *scmd = qc->scsicmd;
682 scmd->retries = scmd->allowed;
683 __ata_eh_qc_complete(qc);
687 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
688 * @qc: Command to retry
690 * Indicate to the mid and upper layers that an ATA command
691 * should be retried. To be used from EH.
693 * SCSI midlayer limits the number of retries to scmd->allowed.
694 * scmd->retries is decremented for commands which get retried
695 * due to unrelated failures (qc->err_mask is zero).
697 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
699 struct scsi_cmnd *scmd = qc->scsicmd;
700 if (!qc->err_mask && scmd->retries)
702 __ata_eh_qc_complete(qc);
706 * ata_eh_detach_dev - detach ATA device
707 * @dev: ATA device to detach
714 static void ata_eh_detach_dev(struct ata_device *dev)
716 struct ata_port *ap = dev->ap;
719 ata_dev_disable(dev);
721 spin_lock_irqsave(ap->lock, flags);
723 dev->flags &= ~ATA_DFLAG_DETACH;
725 if (ata_scsi_offline_dev(dev)) {
726 dev->flags |= ATA_DFLAG_DETACHED;
727 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
730 spin_unlock_irqrestore(ap->lock, flags);
734 * ata_eh_about_to_do - about to perform eh_action
735 * @ap: target ATA port
736 * @dev: target ATA dev for per-dev action (can be NULL)
737 * @action: action about to be performed
739 * Called just before performing EH actions to clear related bits
740 * in @ap->eh_info such that eh actions are not unnecessarily
746 static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
751 spin_lock_irqsave(ap->lock, flags);
752 ata_eh_clear_action(dev, &ap->eh_info, action);
753 ap->flags |= ATA_FLAG_RECOVERED;
754 spin_unlock_irqrestore(ap->lock, flags);
758 * ata_eh_done - EH action complete
759 * @ap: target ATA port
760 * @dev: target ATA dev for per-dev action (can be NULL)
761 * @action: action just completed
763 * Called right after performing EH actions to clear related bits
764 * in @ap->eh_context.
769 static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
772 ata_eh_clear_action(dev, &ap->eh_context.i, action);
776 * ata_err_string - convert err_mask to descriptive string
777 * @err_mask: error mask to convert to string
779 * Convert @err_mask to descriptive string. Errors are
780 * prioritized according to severity and only the most severe
787 * Descriptive string for @err_mask
789 static const char * ata_err_string(unsigned int err_mask)
791 if (err_mask & AC_ERR_HOST_BUS)
792 return "host bus error";
793 if (err_mask & AC_ERR_ATA_BUS)
794 return "ATA bus error";
795 if (err_mask & AC_ERR_TIMEOUT)
797 if (err_mask & AC_ERR_HSM)
798 return "HSM violation";
799 if (err_mask & AC_ERR_SYSTEM)
800 return "internal error";
801 if (err_mask & AC_ERR_MEDIA)
802 return "media error";
803 if (err_mask & AC_ERR_INVALID)
804 return "invalid argument";
805 if (err_mask & AC_ERR_DEV)
806 return "device error";
807 return "unknown error";
811 * ata_read_log_page - read a specific log page
812 * @dev: target device
813 * @page: page to read
814 * @buf: buffer to store read page
815 * @sectors: number of sectors to read
817 * Read log page using READ_LOG_EXT command.
820 * Kernel thread context (may sleep).
823 * 0 on success, AC_ERR_* mask otherwise.
825 static unsigned int ata_read_log_page(struct ata_device *dev,
826 u8 page, void *buf, unsigned int sectors)
828 struct ata_taskfile tf;
829 unsigned int err_mask;
831 DPRINTK("read log page - page %d\n", page);
833 ata_tf_init(dev, &tf);
834 tf.command = ATA_CMD_READ_LOG_EXT;
837 tf.hob_nsect = sectors >> 8;
838 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
839 tf.protocol = ATA_PROT_PIO;
841 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
842 buf, sectors * ATA_SECT_SIZE);
844 DPRINTK("EXIT, err_mask=%x\n", err_mask);
849 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
850 * @dev: Device to read log page 10h from
851 * @tag: Resulting tag of the failed command
852 * @tf: Resulting taskfile registers of the failed command
854 * Read log page 10h to obtain NCQ error details and clear error
858 * Kernel thread context (may sleep).
861 * 0 on success, -errno otherwise.
863 static int ata_eh_read_log_10h(struct ata_device *dev,
864 int *tag, struct ata_taskfile *tf)
866 u8 *buf = dev->ap->sector_buf;
867 unsigned int err_mask;
871 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
876 for (i = 0; i < ATA_SECT_SIZE; i++)
879 ata_dev_printk(dev, KERN_WARNING,
880 "invalid checksum 0x%x on log page 10h\n", csum);
885 *tag = buf[0] & 0x1f;
887 tf->command = buf[2];
888 tf->feature = buf[3];
893 tf->hob_lbal = buf[8];
894 tf->hob_lbam = buf[9];
895 tf->hob_lbah = buf[10];
897 tf->hob_nsect = buf[13];
903 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
904 * @dev: device to perform REQUEST_SENSE to
905 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
907 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
908 * SENSE. This function is EH helper.
911 * Kernel thread context (may sleep).
914 * 0 on success, AC_ERR_* mask on failure
916 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
917 unsigned char *sense_buf)
919 struct ata_port *ap = dev->ap;
920 struct ata_taskfile tf;
921 u8 cdb[ATAPI_CDB_LEN];
923 DPRINTK("ATAPI request sense\n");
925 ata_tf_init(dev, &tf);
927 /* FIXME: is this needed? */
928 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
930 /* XXX: why tf_read here? */
931 ap->ops->tf_read(ap, &tf);
933 /* fill these in, for the case where they are -not- overwritten */
935 sense_buf[2] = tf.feature >> 4;
937 memset(cdb, 0, ATAPI_CDB_LEN);
938 cdb[0] = REQUEST_SENSE;
939 cdb[4] = SCSI_SENSE_BUFFERSIZE;
941 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
942 tf.command = ATA_CMD_PACKET;
944 /* is it pointless to prefer PIO for "safety reasons"? */
945 if (ap->flags & ATA_FLAG_PIO_DMA) {
946 tf.protocol = ATA_PROT_ATAPI_DMA;
947 tf.feature |= ATAPI_PKT_DMA;
949 tf.protocol = ATA_PROT_ATAPI;
950 tf.lbam = (8 * 1024) & 0xff;
951 tf.lbah = (8 * 1024) >> 8;
954 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
955 sense_buf, SCSI_SENSE_BUFFERSIZE);
959 * ata_eh_analyze_serror - analyze SError for a failed port
960 * @ap: ATA port to analyze SError for
962 * Analyze SError if available and further determine cause of
968 static void ata_eh_analyze_serror(struct ata_port *ap)
970 struct ata_eh_context *ehc = &ap->eh_context;
971 u32 serror = ehc->i.serror;
972 unsigned int err_mask = 0, action = 0;
974 if (serror & SERR_PERSISTENT) {
975 err_mask |= AC_ERR_ATA_BUS;
976 action |= ATA_EH_HARDRESET;
979 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
980 err_mask |= AC_ERR_ATA_BUS;
981 action |= ATA_EH_SOFTRESET;
983 if (serror & SERR_PROTOCOL) {
984 err_mask |= AC_ERR_HSM;
985 action |= ATA_EH_SOFTRESET;
987 if (serror & SERR_INTERNAL) {
988 err_mask |= AC_ERR_SYSTEM;
989 action |= ATA_EH_SOFTRESET;
991 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
992 ata_ehi_hotplugged(&ehc->i);
994 ehc->i.err_mask |= err_mask;
995 ehc->i.action |= action;
999 * ata_eh_analyze_ncq_error - analyze NCQ error
1000 * @ap: ATA port to analyze NCQ error for
1002 * Read log page 10h, determine the offending qc and acquire
1003 * error status TF. For NCQ device errors, all LLDDs have to do
1004 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1008 * Kernel thread context (may sleep).
1010 static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1012 struct ata_eh_context *ehc = &ap->eh_context;
1013 struct ata_device *dev = ap->device;
1014 struct ata_queued_cmd *qc;
1015 struct ata_taskfile tf;
1018 /* if frozen, we can't do much */
1019 if (ap->flags & ATA_FLAG_FROZEN)
1022 /* is it NCQ device error? */
1023 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1026 /* has LLDD analyzed already? */
1027 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1028 qc = __ata_qc_from_tag(ap, tag);
1030 if (!(qc->flags & ATA_QCFLAG_FAILED))
1037 /* okay, this error is ours */
1038 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1040 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1041 "(errno=%d)\n", rc);
1045 if (!(ap->sactive & (1 << tag))) {
1046 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1047 "inactive tag %d\n", tag);
1051 /* we've got the perpetrator, condemn it */
1052 qc = __ata_qc_from_tag(ap, tag);
1053 memcpy(&qc->result_tf, &tf, sizeof(tf));
1054 qc->err_mask |= AC_ERR_DEV;
1055 ehc->i.err_mask &= ~AC_ERR_DEV;
1059 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1060 * @qc: qc to analyze
1061 * @tf: Taskfile registers to analyze
1063 * Analyze taskfile of @qc and further determine cause of
1064 * failure. This function also requests ATAPI sense data if
1068 * Kernel thread context (may sleep).
1071 * Determined recovery action
1073 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1074 const struct ata_taskfile *tf)
1076 unsigned int tmp, action = 0;
1077 u8 stat = tf->command, err = tf->feature;
1079 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1080 qc->err_mask |= AC_ERR_HSM;
1081 return ATA_EH_SOFTRESET;
1084 if (!(qc->err_mask & AC_ERR_DEV))
1087 switch (qc->dev->class) {
1090 qc->err_mask |= AC_ERR_ATA_BUS;
1092 qc->err_mask |= AC_ERR_MEDIA;
1094 qc->err_mask |= AC_ERR_INVALID;
1098 tmp = atapi_eh_request_sense(qc->dev,
1099 qc->scsicmd->sense_buffer);
1101 /* ATA_QCFLAG_SENSE_VALID is used to tell
1102 * atapi_qc_complete() that sense data is
1105 * TODO: interpret sense data and set
1106 * appropriate err_mask.
1108 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1110 qc->err_mask |= tmp;
1113 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1114 action |= ATA_EH_SOFTRESET;
1119 static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1121 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1125 if (ent->err_mask & AC_ERR_HSM)
1127 if ((ent->err_mask &
1128 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1135 struct speed_down_needed_arg {
1140 static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1142 struct speed_down_needed_arg *arg = void_arg;
1144 if (ent->timestamp < arg->since)
1147 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1152 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1153 * @dev: Device of interest
1155 * This function examines error ring of @dev and determines
1156 * whether speed down is necessary. Speed down is necessary if
1157 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1158 * errors during last 15 minutes.
1160 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1161 * violation for known supported commands.
1163 * Cat-2 errors are unclassified DEV error for known supported
1167 * Inherited from caller.
1170 * 1 if speed down is necessary, 0 otherwise
1172 static int ata_eh_speed_down_needed(struct ata_device *dev)
1174 const u64 interval = 15LLU * 60 * HZ;
1175 static const int err_limits[3] = { -1, 3, 10 };
1176 struct speed_down_needed_arg arg;
1177 struct ata_ering_entry *ent;
1181 ent = ata_ering_top(&dev->ering);
1185 err_cat = ata_eh_categorize_ering_entry(ent);
1189 memset(&arg, 0, sizeof(arg));
1191 j64 = get_jiffies_64();
1192 if (j64 >= interval)
1193 arg.since = j64 - interval;
1197 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1199 return arg.nr_errors[err_cat] > err_limits[err_cat];
1203 * ata_eh_speed_down - record error and speed down if necessary
1204 * @dev: Failed device
1205 * @is_io: Did the device fail during normal IO?
1206 * @err_mask: err_mask of the error
1208 * Record error and examine error history to determine whether
1209 * adjusting transmission speed is necessary. It also sets
1210 * transmission limits appropriately if such adjustment is
1214 * Kernel thread context (may sleep).
1217 * 0 on success, -errno otherwise
1219 static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1220 unsigned int err_mask)
1225 /* record error and determine whether speed down is necessary */
1226 ata_ering_record(&dev->ering, is_io, err_mask);
1228 if (!ata_eh_speed_down_needed(dev))
1231 /* speed down SATA link speed if possible */
1232 if (sata_down_spd_limit(dev->ap) == 0)
1233 return ATA_EH_HARDRESET;
1235 /* lower transfer mode */
1236 if (ata_down_xfermask_limit(dev, 0) == 0)
1237 return ATA_EH_SOFTRESET;
1239 ata_dev_printk(dev, KERN_ERR,
1240 "speed down requested but no transfer mode left\n");
1245 * ata_eh_autopsy - analyze error and determine recovery action
1246 * @ap: ATA port to perform autopsy on
1248 * Analyze why @ap failed and determine which recovery action is
1249 * needed. This function also sets more detailed AC_ERR_* values
1250 * and fills sense data for ATAPI CHECK SENSE.
1253 * Kernel thread context (may sleep).
1255 static void ata_eh_autopsy(struct ata_port *ap)
1257 struct ata_eh_context *ehc = &ap->eh_context;
1258 unsigned int action = ehc->i.action;
1259 struct ata_device *failed_dev = NULL;
1260 unsigned int all_err_mask = 0;
1267 /* obtain and analyze SError */
1268 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1270 ehc->i.serror |= serror;
1271 ata_eh_analyze_serror(ap);
1272 } else if (rc != -EOPNOTSUPP)
1273 action |= ATA_EH_HARDRESET;
1275 /* analyze NCQ failure */
1276 ata_eh_analyze_ncq_error(ap);
1278 /* any real error trumps AC_ERR_OTHER */
1279 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1280 ehc->i.err_mask &= ~AC_ERR_OTHER;
1282 all_err_mask |= ehc->i.err_mask;
1284 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1285 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1287 if (!(qc->flags & ATA_QCFLAG_FAILED))
1290 /* inherit upper level err_mask */
1291 qc->err_mask |= ehc->i.err_mask;
1294 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1296 /* DEV errors are probably spurious in case of ATA_BUS error */
1297 if (qc->err_mask & AC_ERR_ATA_BUS)
1298 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1301 /* any real error trumps unknown error */
1302 if (qc->err_mask & ~AC_ERR_OTHER)
1303 qc->err_mask &= ~AC_ERR_OTHER;
1305 /* SENSE_VALID trumps dev/unknown error and revalidation */
1306 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1307 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1308 action &= ~ATA_EH_REVALIDATE;
1311 /* accumulate error info */
1312 failed_dev = qc->dev;
1313 all_err_mask |= qc->err_mask;
1314 if (qc->flags & ATA_QCFLAG_IO)
1318 /* enforce default EH actions */
1319 if (ap->flags & ATA_FLAG_FROZEN ||
1320 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1321 action |= ATA_EH_SOFTRESET;
1322 else if (all_err_mask)
1323 action |= ATA_EH_REVALIDATE;
1325 /* if we have offending qcs and the associated failed device */
1328 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1330 /* perform per-dev EH action only on the offending device */
1331 ehc->i.dev_action[failed_dev->devno] |=
1332 action & ATA_EH_PERDEV_MASK;
1333 action &= ~ATA_EH_PERDEV_MASK;
1336 /* record autopsy result */
1337 ehc->i.dev = failed_dev;
1338 ehc->i.action = action;
1344 * ata_eh_report - report error handling to user
1345 * @ap: ATA port EH is going on
1347 * Report EH to user.
1352 static void ata_eh_report(struct ata_port *ap)
1354 struct ata_eh_context *ehc = &ap->eh_context;
1355 const char *frozen, *desc;
1356 int tag, nr_failed = 0;
1359 if (ehc->i.desc[0] != '\0')
1362 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1363 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1365 if (!(qc->flags & ATA_QCFLAG_FAILED))
1367 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1373 if (!nr_failed && !ehc->i.err_mask)
1377 if (ap->flags & ATA_FLAG_FROZEN)
1381 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1382 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1383 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1384 ehc->i.action, frozen);
1386 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1388 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1389 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1390 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1391 ehc->i.action, frozen);
1393 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1396 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1397 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1399 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1402 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1403 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1404 qc->tag, qc->tf.command, qc->err_mask,
1405 qc->result_tf.command, qc->result_tf.feature,
1406 ata_err_string(qc->err_mask));
1410 static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1411 unsigned int *classes)
1415 for (i = 0; i < ATA_MAX_DEVICES; i++)
1416 classes[i] = ATA_DEV_UNKNOWN;
1418 rc = reset(ap, classes);
1422 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1423 * is complete and convert all ATA_DEV_UNKNOWN to
1426 for (i = 0; i < ATA_MAX_DEVICES; i++)
1427 if (classes[i] != ATA_DEV_UNKNOWN)
1430 if (i < ATA_MAX_DEVICES)
1431 for (i = 0; i < ATA_MAX_DEVICES; i++)
1432 if (classes[i] == ATA_DEV_UNKNOWN)
1433 classes[i] = ATA_DEV_NONE;
1438 static int ata_eh_followup_srst_needed(int rc, int classify,
1439 const unsigned int *classes)
1445 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1450 static int ata_eh_reset(struct ata_port *ap, int classify,
1451 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1452 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1454 struct ata_eh_context *ehc = &ap->eh_context;
1455 unsigned int *classes = ehc->classes;
1456 int tries = ATA_EH_RESET_TRIES;
1457 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1458 unsigned int action;
1459 ata_reset_fn_t reset;
1460 int i, did_followup_srst, rc;
1462 /* Determine which reset to use and record in ehc->i.action.
1463 * prereset() may examine and modify it.
1465 action = ehc->i.action;
1466 ehc->i.action &= ~ATA_EH_RESET_MASK;
1467 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1468 !(action & ATA_EH_HARDRESET))))
1469 ehc->i.action |= ATA_EH_SOFTRESET;
1471 ehc->i.action |= ATA_EH_HARDRESET;
1476 ata_port_printk(ap, KERN_ERR,
1477 "prereset failed (errno=%d)\n", rc);
1482 /* prereset() might have modified ehc->i.action */
1483 if (ehc->i.action & ATA_EH_HARDRESET)
1485 else if (ehc->i.action & ATA_EH_SOFTRESET)
1488 /* prereset told us not to reset, bang classes and return */
1489 for (i = 0; i < ATA_MAX_DEVICES; i++)
1490 classes[i] = ATA_DEV_NONE;
1494 /* did prereset() screw up? if so, fix up to avoid oopsing */
1496 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1497 "invalid reset type\n");
1505 /* shut up during boot probing */
1507 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1508 reset == softreset ? "soft" : "hard");
1511 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1512 ehc->i.flags |= ATA_EHI_DID_RESET;
1514 rc = ata_do_reset(ap, reset, classes);
1516 did_followup_srst = 0;
1517 if (reset == hardreset &&
1518 ata_eh_followup_srst_needed(rc, classify, classes)) {
1519 /* okay, let's do follow-up softreset */
1520 did_followup_srst = 1;
1524 ata_port_printk(ap, KERN_ERR,
1525 "follow-up softreset required "
1526 "but no softreset avaliable\n");
1530 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1531 rc = ata_do_reset(ap, reset, classes);
1533 if (rc == 0 && classify &&
1534 classes[0] == ATA_DEV_UNKNOWN) {
1535 ata_port_printk(ap, KERN_ERR,
1536 "classification failed\n");
1541 if (rc && --tries) {
1544 if (reset == softreset) {
1545 if (did_followup_srst)
1546 type = "follow-up soft";
1552 ata_port_printk(ap, KERN_WARNING,
1553 "%sreset failed, retrying in 5 secs\n", type);
1556 if (reset == hardreset)
1557 sata_down_spd_limit(ap);
1564 /* After the reset, the device state is PIO 0 and the
1565 * controller state is undefined. Record the mode.
1567 for (i = 0; i < ATA_MAX_DEVICES; i++)
1568 ap->device[i].pio_mode = XFER_PIO_0;
1571 postreset(ap, classes);
1573 /* reset successful, schedule revalidation */
1574 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
1575 ehc->i.action |= ATA_EH_REVALIDATE;
1581 static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1582 struct ata_device **r_failed_dev)
1584 struct ata_eh_context *ehc = &ap->eh_context;
1585 struct ata_device *dev;
1586 unsigned long flags;
1591 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1592 unsigned int action;
1594 dev = &ap->device[i];
1595 action = ehc->i.action | ehc->i.dev_action[dev->devno];
1597 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1598 if (ata_port_offline(ap)) {
1603 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1604 rc = ata_dev_revalidate(dev,
1605 ehc->i.flags & ATA_EHI_DID_RESET);
1609 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1611 /* schedule the scsi_rescan_device() here */
1612 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1613 } else if (dev->class == ATA_DEV_UNKNOWN &&
1614 ehc->tries[dev->devno] &&
1615 ata_class_enabled(ehc->classes[dev->devno])) {
1616 dev->class = ehc->classes[dev->devno];
1618 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1620 rc = ata_dev_configure(dev, 1);
1623 dev->class = ATA_DEV_UNKNOWN;
1627 spin_lock_irqsave(ap->lock, flags);
1628 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1629 spin_unlock_irqrestore(ap->lock, flags);
1634 *r_failed_dev = dev;
1640 static int ata_port_nr_enabled(struct ata_port *ap)
1644 for (i = 0; i < ATA_MAX_DEVICES; i++)
1645 if (ata_dev_enabled(&ap->device[i]))
1650 static int ata_port_nr_vacant(struct ata_port *ap)
1654 for (i = 0; i < ATA_MAX_DEVICES; i++)
1655 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1660 static int ata_eh_skip_recovery(struct ata_port *ap)
1662 struct ata_eh_context *ehc = &ap->eh_context;
1665 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1668 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1669 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1670 struct ata_device *dev = &ap->device[i];
1672 if (dev->class == ATA_DEV_UNKNOWN &&
1673 ehc->classes[dev->devno] != ATA_DEV_NONE)
1681 * ata_eh_recover - recover host port after error
1682 * @ap: host port to recover
1683 * @prereset: prereset method (can be NULL)
1684 * @softreset: softreset method (can be NULL)
1685 * @hardreset: hardreset method (can be NULL)
1686 * @postreset: postreset method (can be NULL)
1688 * This is the alpha and omega, eum and yang, heart and soul of
1689 * libata exception handling. On entry, actions required to
1690 * recover the port and hotplug requests are recorded in
1691 * eh_context. This function executes all the operations with
1692 * appropriate retrials and fallbacks to resurrect failed
1693 * devices, detach goners and greet newcomers.
1696 * Kernel thread context (may sleep).
1699 * 0 on success, -errno on failure.
1701 static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1702 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1703 ata_postreset_fn_t postreset)
1705 struct ata_eh_context *ehc = &ap->eh_context;
1706 struct ata_device *dev;
1707 int down_xfermask, i, rc;
1711 /* prep for recovery */
1712 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1713 dev = &ap->device[i];
1715 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1717 /* process hotplug request */
1718 if (dev->flags & ATA_DFLAG_DETACH)
1719 ata_eh_detach_dev(dev);
1721 if (!ata_dev_enabled(dev) &&
1722 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1723 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1724 ata_eh_detach_dev(dev);
1726 ehc->did_probe_mask |= (1 << dev->devno);
1727 ehc->i.action |= ATA_EH_SOFTRESET;
1735 /* if UNLOADING, finish immediately */
1736 if (ap->flags & ATA_FLAG_UNLOADING)
1739 /* skip EH if possible. */
1740 if (ata_eh_skip_recovery(ap))
1743 for (i = 0; i < ATA_MAX_DEVICES; i++)
1744 ehc->classes[i] = ATA_DEV_UNKNOWN;
1747 if (ehc->i.action & ATA_EH_RESET_MASK) {
1748 ata_eh_freeze_port(ap);
1750 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1751 softreset, hardreset, postreset);
1753 ata_port_printk(ap, KERN_ERR,
1754 "reset failed, giving up\n");
1758 ata_eh_thaw_port(ap);
1761 /* revalidate existing devices and attach new ones */
1762 rc = ata_eh_revalidate_and_attach(ap, &dev);
1766 /* configure transfer mode if the port has been reset */
1767 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1768 rc = ata_set_mode(ap, &dev);
1780 /* device missing, schedule probing */
1781 ehc->i.probe_mask |= (1 << dev->devno);
1783 ehc->tries[dev->devno] = 0;
1786 sata_down_spd_limit(ap);
1788 ehc->tries[dev->devno]--;
1789 if (down_xfermask &&
1790 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1791 ehc->tries[dev->devno] = 0;
1794 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1795 /* disable device if it has used up all its chances */
1796 ata_dev_disable(dev);
1798 /* detach if offline */
1799 if (ata_port_offline(ap))
1800 ata_eh_detach_dev(dev);
1802 /* probe if requested */
1803 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1804 !(ehc->did_probe_mask & (1 << dev->devno))) {
1805 ata_eh_detach_dev(dev);
1808 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1809 ehc->did_probe_mask |= (1 << dev->devno);
1810 ehc->i.action |= ATA_EH_SOFTRESET;
1813 /* soft didn't work? be haaaaard */
1814 if (ehc->i.flags & ATA_EHI_DID_RESET)
1815 ehc->i.action |= ATA_EH_HARDRESET;
1817 ehc->i.action |= ATA_EH_SOFTRESET;
1820 if (ata_port_nr_enabled(ap)) {
1821 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1822 "devices, retrying in 5 secs\n");
1825 /* no device left, repeat fast */
1833 for (i = 0; i < ATA_MAX_DEVICES; i++)
1834 ata_dev_disable(&ap->device[i]);
1837 DPRINTK("EXIT, rc=%d\n", rc);
1842 * ata_eh_finish - finish up EH
1843 * @ap: host port to finish EH for
1845 * Recovery is complete. Clean up EH states and retry or finish
1851 static void ata_eh_finish(struct ata_port *ap)
1855 /* retry or finish qcs */
1856 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1857 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1859 if (!(qc->flags & ATA_QCFLAG_FAILED))
1863 /* FIXME: Once EH migration is complete,
1864 * generate sense data in this function,
1865 * considering both err_mask and tf.
1867 if (qc->err_mask & AC_ERR_INVALID)
1868 ata_eh_qc_complete(qc);
1870 ata_eh_qc_retry(qc);
1872 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1873 ata_eh_qc_complete(qc);
1875 /* feed zero TF to sense generation */
1876 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1877 ata_eh_qc_retry(qc);
1884 * ata_do_eh - do standard error handling
1885 * @ap: host port to handle error for
1886 * @prereset: prereset method (can be NULL)
1887 * @softreset: softreset method (can be NULL)
1888 * @hardreset: hardreset method (can be NULL)
1889 * @postreset: postreset method (can be NULL)
1891 * Perform standard error handling sequence.
1894 * Kernel thread context (may sleep).
1896 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1897 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1898 ata_postreset_fn_t postreset)
1900 if (!(ap->flags & ATA_FLAG_LOADING)) {
1905 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);