4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
52 #include <asm/byteorder.h>
54 #include <asm/uaccess.h>
57 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
58 int uptodate, unsigned int nr_bytes)
63 * if failfast is set on a request, override number of sectors and
64 * complete the whole request right now
66 if (blk_noretry_request(rq) && end_io_error(uptodate))
67 nr_bytes = rq->hard_nr_sectors << 9;
69 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
73 * decide whether to reenable DMA -- 3 is a random magic for now,
74 * if we DMA timeout more than 3 times, just stay in PIO
76 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
78 HWGROUP(drive)->hwif->ide_dma_on(drive);
81 if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
82 add_disk_randomness(rq->rq_disk);
83 if (!list_empty(&rq->queuelist))
84 blkdev_dequeue_request(rq);
85 HWGROUP(drive)->rq = NULL;
86 end_that_request_last(rq, uptodate);
94 * ide_end_request - complete an IDE I/O
95 * @drive: IDE device for the I/O
97 * @nr_sectors: number of sectors completed
99 * This is our end_request wrapper function. We complete the I/O
100 * update random number input and dequeue the request, which if
101 * it was tagged may be out of order.
104 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
106 unsigned int nr_bytes = nr_sectors << 9;
112 * room for locking improvements here, the calls below don't
113 * need the queue lock held at all
115 spin_lock_irqsave(&ide_lock, flags);
116 rq = HWGROUP(drive)->rq;
119 if (blk_pc_request(rq))
120 nr_bytes = rq->data_len;
122 nr_bytes = rq->hard_cur_sectors << 9;
125 ret = __ide_end_request(drive, rq, uptodate, nr_bytes);
127 spin_unlock_irqrestore(&ide_lock, flags);
130 EXPORT_SYMBOL(ide_end_request);
133 * Power Management state machine. This one is rather trivial for now,
134 * we should probably add more, like switching back to PIO on suspend
135 * to help some BIOSes, re-do the door locking on resume, etc...
139 ide_pm_flush_cache = ide_pm_state_start_suspend,
142 idedisk_pm_restore_pio = ide_pm_state_start_resume,
147 static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
149 struct request_pm_state *pm = rq->data;
151 if (drive->media != ide_disk)
154 switch (pm->pm_step) {
155 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */
156 if (pm->pm_state == PM_EVENT_FREEZE)
157 pm->pm_step = ide_pm_state_completed;
159 pm->pm_step = idedisk_pm_standby;
161 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */
162 pm->pm_step = ide_pm_state_completed;
164 case idedisk_pm_restore_pio: /* Resume step 1 complete */
165 pm->pm_step = idedisk_pm_idle;
167 case idedisk_pm_idle: /* Resume step 2 (idle) complete */
168 pm->pm_step = ide_pm_restore_dma;
173 static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
175 struct request_pm_state *pm = rq->data;
176 ide_task_t *args = rq->special;
178 memset(args, 0, sizeof(*args));
180 switch (pm->pm_step) {
181 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */
182 if (drive->media != ide_disk)
184 /* Not supported? Switch to next step now. */
185 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) {
186 ide_complete_power_step(drive, rq, 0, 0);
189 if (ide_id_has_flush_cache_ext(drive->id))
190 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT;
192 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE;
193 args->command_type = IDE_DRIVE_TASK_NO_DATA;
194 args->handler = &task_no_data_intr;
195 return do_rw_taskfile(drive, args);
197 case idedisk_pm_standby: /* Suspend step 2 (standby) */
198 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1;
199 args->command_type = IDE_DRIVE_TASK_NO_DATA;
200 args->handler = &task_no_data_intr;
201 return do_rw_taskfile(drive, args);
203 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */
204 ide_set_max_pio(drive);
206 * skip idedisk_pm_idle for ATAPI devices
208 if (drive->media != ide_disk)
209 pm->pm_step = ide_pm_restore_dma;
211 ide_complete_power_step(drive, rq, 0, 0);
214 case idedisk_pm_idle: /* Resume step 2 (idle) */
215 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
216 args->command_type = IDE_DRIVE_TASK_NO_DATA;
217 args->handler = task_no_data_intr;
218 return do_rw_taskfile(drive, args);
220 case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */
222 * Right now, all we do is call ide_set_dma(drive),
223 * we could be smarter and check for current xfer_speed
224 * in struct drive etc...
226 if (drive->hwif->ide_dma_on == NULL)
228 drive->hwif->dma_off_quietly(drive);
230 * TODO: respect ->using_dma setting
235 pm->pm_step = ide_pm_state_completed;
240 * ide_end_dequeued_request - complete an IDE I/O
241 * @drive: IDE device for the I/O
243 * @nr_sectors: number of sectors completed
245 * Complete an I/O that is no longer on the request queue. This
246 * typically occurs when we pull the request and issue a REQUEST_SENSE.
247 * We must still finish the old request but we must not tamper with the
248 * queue in the meantime.
250 * NOTE: This path does not handle barrier, but barrier is not supported
254 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
255 int uptodate, int nr_sectors)
260 spin_lock_irqsave(&ide_lock, flags);
262 BUG_ON(!blk_rq_started(rq));
265 * if failfast is set on a request, override number of sectors and
266 * complete the whole request right now
268 if (blk_noretry_request(rq) && end_io_error(uptodate))
269 nr_sectors = rq->hard_nr_sectors;
271 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
275 * decide whether to reenable DMA -- 3 is a random magic for now,
276 * if we DMA timeout more than 3 times, just stay in PIO
278 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
280 HWGROUP(drive)->hwif->ide_dma_on(drive);
283 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
284 add_disk_randomness(rq->rq_disk);
285 if (blk_rq_tagged(rq))
286 blk_queue_end_tag(drive->queue, rq);
287 end_that_request_last(rq, uptodate);
290 spin_unlock_irqrestore(&ide_lock, flags);
293 EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
297 * ide_complete_pm_request - end the current Power Management request
298 * @drive: target drive
301 * This function cleans up the current PM request and stops the queue
304 static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
309 printk("%s: completing PM request, %s\n", drive->name,
310 blk_pm_suspend_request(rq) ? "suspend" : "resume");
312 spin_lock_irqsave(&ide_lock, flags);
313 if (blk_pm_suspend_request(rq)) {
314 blk_stop_queue(drive->queue);
317 blk_start_queue(drive->queue);
319 blkdev_dequeue_request(rq);
320 HWGROUP(drive)->rq = NULL;
321 end_that_request_last(rq, 1);
322 spin_unlock_irqrestore(&ide_lock, flags);
326 * ide_end_drive_cmd - end an explicit drive command
331 * Clean up after success/failure of an explicit drive command.
332 * These get thrown onto the queue so they are synchronized with
333 * real I/O operations on the drive.
335 * In LBA48 mode we have to read the register set twice to get
336 * all the extra information out.
339 void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
341 ide_hwif_t *hwif = HWIF(drive);
345 spin_lock_irqsave(&ide_lock, flags);
346 rq = HWGROUP(drive)->rq;
347 spin_unlock_irqrestore(&ide_lock, flags);
349 if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
350 u8 *args = (u8 *) rq->buffer;
352 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
357 args[2] = hwif->INB(IDE_NSECTOR_REG);
359 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
360 u8 *args = (u8 *) rq->buffer;
362 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
367 args[2] = hwif->INB(IDE_NSECTOR_REG);
368 args[3] = hwif->INB(IDE_SECTOR_REG);
369 args[4] = hwif->INB(IDE_LCYL_REG);
370 args[5] = hwif->INB(IDE_HCYL_REG);
371 args[6] = hwif->INB(IDE_SELECT_REG);
373 } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
374 ide_task_t *args = (ide_task_t *) rq->special;
376 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
379 if (args->tf_in_flags.b.data) {
380 u16 data = hwif->INW(IDE_DATA_REG);
381 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
382 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF;
384 args->tfRegister[IDE_ERROR_OFFSET] = err;
385 /* be sure we're looking at the low order bits */
386 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
387 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
388 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
389 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
390 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
391 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG);
392 args->tfRegister[IDE_STATUS_OFFSET] = stat;
394 if (drive->addressing == 1) {
395 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
396 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG);
397 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
398 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
399 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
400 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
403 } else if (blk_pm_request(rq)) {
404 struct request_pm_state *pm = rq->data;
406 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
407 drive->name, rq->pm->pm_step, stat, err);
409 ide_complete_power_step(drive, rq, stat, err);
410 if (pm->pm_step == ide_pm_state_completed)
411 ide_complete_pm_request(drive, rq);
415 spin_lock_irqsave(&ide_lock, flags);
416 blkdev_dequeue_request(rq);
417 HWGROUP(drive)->rq = NULL;
419 end_that_request_last(rq, !rq->errors);
420 spin_unlock_irqrestore(&ide_lock, flags);
423 EXPORT_SYMBOL(ide_end_drive_cmd);
426 * try_to_flush_leftover_data - flush junk
427 * @drive: drive to flush
429 * try_to_flush_leftover_data() is invoked in response to a drive
430 * unexpectedly having its DRQ_STAT bit set. As an alternative to
431 * resetting the drive, this routine tries to clear the condition
432 * by read a sector's worth of data from the drive. Of course,
433 * this may not help if the drive is *waiting* for data from *us*.
435 static void try_to_flush_leftover_data (ide_drive_t *drive)
437 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
439 if (drive->media != ide_disk)
443 u32 wcount = (i > 16) ? 16 : i;
446 HWIF(drive)->ata_input_data(drive, buffer, wcount);
450 static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
455 drv = *(ide_driver_t **)rq->rq_disk->private_data;
456 drv->end_request(drive, 0, 0);
458 ide_end_request(drive, 0, 0);
461 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
463 ide_hwif_t *hwif = drive->hwif;
465 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
466 /* other bits are useless when BUSY */
467 rq->errors |= ERROR_RESET;
468 } else if (stat & ERR_STAT) {
469 /* err has different meaning on cdrom and tape */
470 if (err == ABRT_ERR) {
471 if (drive->select.b.lba &&
472 /* some newer drives don't support WIN_SPECIFY */
473 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY)
475 } else if ((err & BAD_CRC) == BAD_CRC) {
476 /* UDMA crc error, just retry the operation */
478 } else if (err & (BBD_ERR | ECC_ERR)) {
479 /* retries won't help these */
480 rq->errors = ERROR_MAX;
481 } else if (err & TRK0_ERR) {
482 /* help it find track zero */
483 rq->errors |= ERROR_RECAL;
487 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ &&
488 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0)
489 try_to_flush_leftover_data(drive);
491 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
492 ide_kill_rq(drive, rq);
496 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
497 rq->errors |= ERROR_RESET;
499 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
501 return ide_do_reset(drive);
504 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
505 drive->special.b.recalibrate = 1;
512 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
514 ide_hwif_t *hwif = drive->hwif;
516 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
517 /* other bits are useless when BUSY */
518 rq->errors |= ERROR_RESET;
520 /* add decoding error stuff */
523 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
525 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
527 if (rq->errors >= ERROR_MAX) {
528 ide_kill_rq(drive, rq);
530 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
532 return ide_do_reset(drive);
541 __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
543 if (drive->media == ide_disk)
544 return ide_ata_error(drive, rq, stat, err);
545 return ide_atapi_error(drive, rq, stat, err);
548 EXPORT_SYMBOL_GPL(__ide_error);
551 * ide_error - handle an error on the IDE
552 * @drive: drive the error occurred on
553 * @msg: message to report
556 * ide_error() takes action based on the error returned by the drive.
557 * For normal I/O that may well include retries. We deal with
558 * both new-style (taskfile) and old style command handling here.
559 * In the case of taskfile command handling there is work left to
563 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
568 err = ide_dump_status(drive, msg, stat);
570 if ((rq = HWGROUP(drive)->rq) == NULL)
573 /* retry only "normal" I/O: */
574 if (!blk_fs_request(rq)) {
576 ide_end_drive_cmd(drive, stat, err);
583 drv = *(ide_driver_t **)rq->rq_disk->private_data;
584 return drv->error(drive, rq, stat, err);
586 return __ide_error(drive, rq, stat, err);
589 EXPORT_SYMBOL_GPL(ide_error);
591 ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
593 if (drive->media != ide_disk)
594 rq->errors |= ERROR_RESET;
596 ide_kill_rq(drive, rq);
601 EXPORT_SYMBOL_GPL(__ide_abort);
604 * ide_abort - abort pending IDE operations
605 * @drive: drive the error occurred on
606 * @msg: message to report
608 * ide_abort kills and cleans up when we are about to do a
609 * host initiated reset on active commands. Longer term we
610 * want handlers to have sensible abort handling themselves
612 * This differs fundamentally from ide_error because in
613 * this case the command is doing just fine when we
617 ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
621 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
624 /* retry only "normal" I/O: */
625 if (!blk_fs_request(rq)) {
627 ide_end_drive_cmd(drive, BUSY_STAT, 0);
634 drv = *(ide_driver_t **)rq->rq_disk->private_data;
635 return drv->abort(drive, rq);
637 return __ide_abort(drive, rq);
641 * ide_cmd - issue a simple drive command
642 * @drive: drive the command is for
644 * @nsect: sector byte
645 * @handler: handler for the command completion
647 * Issue a simple drive command with interrupts.
648 * The drive must be selected beforehand.
651 static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
652 ide_handler_t *handler)
654 ide_hwif_t *hwif = HWIF(drive);
656 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */
657 SELECT_MASK(drive,0);
658 hwif->OUTB(nsect,IDE_NSECTOR_REG);
659 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
663 * drive_cmd_intr - drive command completion interrupt
664 * @drive: drive the completion interrupt occurred on
666 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
667 * We do any necessary data reading and then wait for the drive to
668 * go non busy. At that point we may read the error data and complete
672 static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
674 struct request *rq = HWGROUP(drive)->rq;
675 ide_hwif_t *hwif = HWIF(drive);
676 u8 *args = (u8 *) rq->buffer;
677 u8 stat = hwif->INB(IDE_STATUS_REG);
680 local_irq_enable_in_hardirq();
681 if ((stat & DRQ_STAT) && args && args[3]) {
682 u8 io_32bit = drive->io_32bit;
684 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
685 drive->io_32bit = io_32bit;
686 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
690 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
691 return ide_error(drive, "drive_cmd", stat);
692 /* calls ide_end_drive_cmd */
693 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
697 static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
699 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
700 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect;
701 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl;
702 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8;
703 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF;
704 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
706 task->handler = &set_geometry_intr;
709 static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
711 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
712 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
714 task->handler = &recal_intr;
717 static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
719 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
720 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
722 task->handler = &set_multmode_intr;
725 static ide_startstop_t ide_disk_special(ide_drive_t *drive)
727 special_t *s = &drive->special;
730 memset(&args, 0, sizeof(ide_task_t));
731 args.command_type = IDE_DRIVE_TASK_NO_DATA;
733 if (s->b.set_geometry) {
734 s->b.set_geometry = 0;
735 ide_init_specify_cmd(drive, &args);
736 } else if (s->b.recalibrate) {
737 s->b.recalibrate = 0;
738 ide_init_restore_cmd(drive, &args);
739 } else if (s->b.set_multmode) {
740 s->b.set_multmode = 0;
741 if (drive->mult_req > drive->id->max_multsect)
742 drive->mult_req = drive->id->max_multsect;
743 ide_init_setmult_cmd(drive, &args);
745 int special = s->all;
747 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
751 do_rw_taskfile(drive, &args);
757 * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
759 static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
768 return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
771 return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
774 return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
781 * do_special - issue some special commands
782 * @drive: drive the command is for
784 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
785 * commands to a drive. It used to do much more, but has been scaled
789 static ide_startstop_t do_special (ide_drive_t *drive)
791 special_t *s = &drive->special;
794 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
797 ide_hwif_t *hwif = drive->hwif;
798 u8 req_pio = drive->tune_req;
802 if (set_pio_mode_abuse(drive->hwif, req_pio)) {
803 if (hwif->set_pio_mode)
804 hwif->set_pio_mode(drive, req_pio);
806 int keep_dma = drive->using_dma;
808 ide_set_pio(drive, req_pio);
810 if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
812 hwif->ide_dma_on(drive);
818 if (drive->media == ide_disk)
819 return ide_disk_special(drive);
827 void ide_map_sg(ide_drive_t *drive, struct request *rq)
829 ide_hwif_t *hwif = drive->hwif;
830 struct scatterlist *sg = hwif->sg_table;
832 if (hwif->sg_mapped) /* needed by ide-scsi */
835 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
836 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
838 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
843 EXPORT_SYMBOL_GPL(ide_map_sg);
845 void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
847 ide_hwif_t *hwif = drive->hwif;
849 hwif->nsect = hwif->nleft = rq->nr_sectors;
854 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
857 * execute_drive_command - issue special drive command
858 * @drive: the drive to issue the command on
859 * @rq: the request structure holding the command
861 * execute_drive_cmd() issues a special drive command, usually
862 * initiated by ioctl() from the external hdparm program. The
863 * command can be a drive command, drive task or taskfile
864 * operation. Weirdly you can call it with NULL to wait for
865 * all commands to finish. Don't do this as that is due to change
868 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
871 ide_hwif_t *hwif = HWIF(drive);
872 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
873 ide_task_t *args = rq->special;
878 hwif->data_phase = args->data_phase;
880 switch (hwif->data_phase) {
881 case TASKFILE_MULTI_OUT:
883 case TASKFILE_MULTI_IN:
885 ide_init_sg_cmd(drive, rq);
886 ide_map_sg(drive, rq);
891 if (args->tf_out_flags.all != 0)
892 return flagged_taskfile(drive, args);
893 return do_rw_taskfile(drive, args);
894 } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
895 u8 *args = rq->buffer;
901 printk("%s: DRIVE_TASK_CMD ", drive->name);
902 printk("cmd=0x%02x ", args[0]);
903 printk("fr=0x%02x ", args[1]);
904 printk("ns=0x%02x ", args[2]);
905 printk("sc=0x%02x ", args[3]);
906 printk("lcyl=0x%02x ", args[4]);
907 printk("hcyl=0x%02x ", args[5]);
908 printk("sel=0x%02x\n", args[6]);
910 hwif->OUTB(args[1], IDE_FEATURE_REG);
911 hwif->OUTB(args[3], IDE_SECTOR_REG);
912 hwif->OUTB(args[4], IDE_LCYL_REG);
913 hwif->OUTB(args[5], IDE_HCYL_REG);
914 sel = (args[6] & ~0x10);
915 if (drive->select.b.unit)
917 hwif->OUTB(sel, IDE_SELECT_REG);
918 ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
920 } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
921 u8 *args = rq->buffer;
926 printk("%s: DRIVE_CMD ", drive->name);
927 printk("cmd=0x%02x ", args[0]);
928 printk("sc=0x%02x ", args[1]);
929 printk("fr=0x%02x ", args[2]);
930 printk("xx=0x%02x\n", args[3]);
932 if (args[0] == WIN_SMART) {
933 hwif->OUTB(0x4f, IDE_LCYL_REG);
934 hwif->OUTB(0xc2, IDE_HCYL_REG);
935 hwif->OUTB(args[2],IDE_FEATURE_REG);
936 hwif->OUTB(args[1],IDE_SECTOR_REG);
937 ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
940 hwif->OUTB(args[2],IDE_FEATURE_REG);
941 ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
947 * NULL is actually a valid way of waiting for
948 * all current requests to be flushed from the queue.
951 printk("%s: DRIVE_CMD (null)\n", drive->name);
953 ide_end_drive_cmd(drive,
954 hwif->INB(IDE_STATUS_REG),
955 hwif->INB(IDE_ERROR_REG));
959 static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
961 struct request_pm_state *pm = rq->data;
963 if (blk_pm_suspend_request(rq) &&
964 pm->pm_step == ide_pm_state_start_suspend)
965 /* Mark drive blocked when starting the suspend sequence. */
967 else if (blk_pm_resume_request(rq) &&
968 pm->pm_step == ide_pm_state_start_resume) {
970 * The first thing we do on wakeup is to wait for BSY bit to
971 * go away (with a looong timeout) as a drive on this hwif may
972 * just be POSTing itself.
973 * We do that before even selecting as the "other" device on
974 * the bus may be broken enough to walk on our toes at this
979 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
981 rc = ide_wait_not_busy(HWIF(drive), 35000);
983 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
985 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
986 rc = ide_wait_not_busy(HWIF(drive), 100000);
988 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
993 * start_request - start of I/O and command issuing for IDE
995 * start_request() initiates handling of a new I/O request. It
996 * accepts commands and I/O (read/write) requests. It also does
997 * the final remapping for weird stuff like EZDrive. Once
998 * device mapper can work sector level the EZDrive stuff can go away
1000 * FIXME: this function needs a rename
1003 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
1005 ide_startstop_t startstop;
1008 BUG_ON(!blk_rq_started(rq));
1011 printk("%s: start_request: current=0x%08lx\n",
1012 HWIF(drive)->name, (unsigned long) rq);
1015 /* bail early if we've exceeded max_failures */
1016 if (drive->max_failures && (drive->failures > drive->max_failures)) {
1021 if (blk_fs_request(rq) &&
1022 (drive->media == ide_disk || drive->media == ide_floppy)) {
1023 block += drive->sect0;
1025 /* Yecch - this will shift the entire interval,
1026 possibly killing some innocent following sector */
1027 if (block == 0 && drive->remap_0_to_1 == 1)
1028 block = 1; /* redirect MBR access to EZ-Drive partn table */
1030 if (blk_pm_request(rq))
1031 ide_check_pm_state(drive, rq);
1033 SELECT_DRIVE(drive);
1034 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
1035 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
1038 if (!drive->special.all) {
1042 * We reset the drive so we need to issue a SETFEATURES.
1043 * Do it _after_ do_special() restored device parameters.
1045 if (drive->current_speed == 0xff)
1046 ide_config_drive_speed(drive, drive->desired_speed);
1048 if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
1049 rq->cmd_type == REQ_TYPE_ATA_TASK ||
1050 rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
1051 return execute_drive_cmd(drive, rq);
1052 else if (blk_pm_request(rq)) {
1053 struct request_pm_state *pm = rq->data;
1055 printk("%s: start_power_step(step: %d)\n",
1056 drive->name, rq->pm->pm_step);
1058 startstop = ide_start_power_step(drive, rq);
1059 if (startstop == ide_stopped &&
1060 pm->pm_step == ide_pm_state_completed)
1061 ide_complete_pm_request(drive, rq);
1065 drv = *(ide_driver_t **)rq->rq_disk->private_data;
1066 return drv->do_request(drive, rq, block);
1068 return do_special(drive);
1070 ide_kill_rq(drive, rq);
1075 * ide_stall_queue - pause an IDE device
1076 * @drive: drive to stall
1077 * @timeout: time to stall for (jiffies)
1079 * ide_stall_queue() can be used by a drive to give excess bandwidth back
1080 * to the hwgroup by sleeping for timeout jiffies.
1083 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
1085 if (timeout > WAIT_WORSTCASE)
1086 timeout = WAIT_WORSTCASE;
1087 drive->sleep = timeout + jiffies;
1088 drive->sleeping = 1;
1091 EXPORT_SYMBOL(ide_stall_queue);
1093 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
1096 * choose_drive - select a drive to service
1097 * @hwgroup: hardware group to select on
1099 * choose_drive() selects the next drive which will be serviced.
1100 * This is necessary because the IDE layer can't issue commands
1101 * to both drives on the same cable, unlike SCSI.
1104 static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
1106 ide_drive_t *drive, *best;
1110 drive = hwgroup->drive;
1113 * drive is doing pre-flush, ordered write, post-flush sequence. even
1114 * though that is 3 requests, it must be seen as a single transaction.
1115 * we must not preempt this drive until that is complete
1117 if (blk_queue_flushing(drive->queue)) {
1119 * small race where queue could get replugged during
1120 * the 3-request flush cycle, just yank the plug since
1121 * we want it to finish asap
1123 blk_remove_plug(drive->queue);
1128 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep))
1129 && !elv_queue_empty(drive->queue)) {
1131 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep)))
1132 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best))))
1134 if (!blk_queue_plugged(drive->queue))
1138 } while ((drive = drive->next) != hwgroup->drive);
1139 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
1140 long t = (signed long)(WAKEUP(best) - jiffies);
1141 if (t >= WAIT_MIN_SLEEP) {
1143 * We *may* have some time to spare, but first let's see if
1144 * someone can potentially benefit from our nice mood today..
1148 if (!drive->sleeping
1149 && time_before(jiffies - best->service_time, WAKEUP(drive))
1150 && time_before(WAKEUP(drive), jiffies + t))
1152 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
1155 } while ((drive = drive->next) != best);
1162 * Issue a new request to a drive from hwgroup
1163 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
1165 * A hwgroup is a serialized group of IDE interfaces. Usually there is
1166 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
1167 * may have both interfaces in a single hwgroup to "serialize" access.
1168 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
1169 * together into one hwgroup for serialized access.
1171 * Note also that several hwgroups can end up sharing a single IRQ,
1172 * possibly along with many other devices. This is especially common in
1173 * PCI-based systems with off-board IDE controller cards.
1175 * The IDE driver uses the single global ide_lock spinlock to protect
1176 * access to the request queues, and to protect the hwgroup->busy flag.
1178 * The first thread into the driver for a particular hwgroup sets the
1179 * hwgroup->busy flag to indicate that this hwgroup is now active,
1180 * and then initiates processing of the top request from the request queue.
1182 * Other threads attempting entry notice the busy setting, and will simply
1183 * queue their new requests and exit immediately. Note that hwgroup->busy
1184 * remains set even when the driver is merely awaiting the next interrupt.
1185 * Thus, the meaning is "this hwgroup is busy processing a request".
1187 * When processing of a request completes, the completing thread or IRQ-handler
1188 * will start the next request from the queue. If no more work remains,
1189 * the driver will clear the hwgroup->busy flag and exit.
1191 * The ide_lock (spinlock) is used to protect all access to the
1192 * hwgroup->busy flag, but is otherwise not needed for most processing in
1193 * the driver. This makes the driver much more friendlier to shared IRQs
1194 * than previous designs, while remaining 100% (?) SMP safe and capable.
1196 static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1201 ide_startstop_t startstop;
1204 /* for atari only: POSSIBLY BROKEN HERE(?) */
1205 ide_get_lock(ide_intr, hwgroup);
1207 /* caller must own ide_lock */
1208 BUG_ON(!irqs_disabled());
1210 while (!hwgroup->busy) {
1212 drive = choose_drive(hwgroup);
1213 if (drive == NULL) {
1215 unsigned long sleep = 0; /* shut up, gcc */
1217 drive = hwgroup->drive;
1219 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) {
1221 sleep = drive->sleep;
1223 } while ((drive = drive->next) != hwgroup->drive);
1226 * Take a short snooze, and then wake up this hwgroup again.
1227 * This gives other hwgroups on the same a chance to
1228 * play fairly with us, just in case there are big differences
1229 * in relative throughputs.. don't want to hog the cpu too much.
1231 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
1232 sleep = jiffies + WAIT_MIN_SLEEP;
1234 if (timer_pending(&hwgroup->timer))
1235 printk(KERN_CRIT "ide_set_handler: timer already active\n");
1237 /* so that ide_timer_expiry knows what to do */
1238 hwgroup->sleeping = 1;
1239 hwgroup->req_gen_timer = hwgroup->req_gen;
1240 mod_timer(&hwgroup->timer, sleep);
1241 /* we purposely leave hwgroup->busy==1
1244 /* Ugly, but how can we sleep for the lock
1245 * otherwise? perhaps from tq_disk?
1248 /* for atari only */
1253 /* no more work for this hwgroup (for now) */
1258 if (hwgroup->hwif->sharing_irq &&
1259 hwif != hwgroup->hwif &&
1260 hwif->io_ports[IDE_CONTROL_OFFSET]) {
1261 /* set nIEN for previous hwif */
1262 SELECT_INTERRUPT(drive);
1264 hwgroup->hwif = hwif;
1265 hwgroup->drive = drive;
1266 drive->sleeping = 0;
1267 drive->service_start = jiffies;
1269 if (blk_queue_plugged(drive->queue)) {
1270 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1275 * we know that the queue isn't empty, but this can happen
1276 * if the q->prep_rq_fn() decides to kill a request
1278 rq = elv_next_request(drive->queue);
1285 * Sanity: don't accept a request that isn't a PM request
1286 * if we are currently power managed. This is very important as
1287 * blk_stop_queue() doesn't prevent the elv_next_request()
1288 * above to return us whatever is in the queue. Since we call
1289 * ide_do_request() ourselves, we end up taking requests while
1290 * the queue is blocked...
1292 * We let requests forced at head of queue with ide-preempt
1293 * though. I hope that doesn't happen too much, hopefully not
1294 * unless the subdriver triggers such a thing in its own PM
1297 * We count how many times we loop here to make sure we service
1298 * all drives in the hwgroup without looping for ever
1300 if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
1301 drive = drive->next ? drive->next : hwgroup->drive;
1302 if (loops++ < 4 && !blk_queue_plugged(drive->queue))
1304 /* We clear busy, there should be no pending ATA command at this point. */
1312 * Some systems have trouble with IDE IRQs arriving while
1313 * the driver is still setting things up. So, here we disable
1314 * the IRQ used by this interface while the request is being started.
1315 * This may look bad at first, but pretty much the same thing
1316 * happens anyway when any interrupt comes in, IDE or otherwise
1317 * -- the kernel masks the IRQ while it is being handled.
1319 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1320 disable_irq_nosync(hwif->irq);
1321 spin_unlock(&ide_lock);
1322 local_irq_enable_in_hardirq();
1323 /* allow other IRQs while we start this request */
1324 startstop = start_request(drive, rq);
1325 spin_lock_irq(&ide_lock);
1326 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1327 enable_irq(hwif->irq);
1328 if (startstop == ide_stopped)
1334 * Passes the stuff to ide_do_request
1336 void do_ide_request(struct request_queue *q)
1338 ide_drive_t *drive = q->queuedata;
1340 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1344 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1345 * retry the current request in pio mode instead of risking tossing it
1348 static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1350 ide_hwif_t *hwif = HWIF(drive);
1352 ide_startstop_t ret = ide_stopped;
1355 * end current dma transaction
1359 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1360 (void)HWIF(drive)->ide_dma_end(drive);
1361 ret = ide_error(drive, "dma timeout error",
1362 hwif->INB(IDE_STATUS_REG));
1364 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1365 hwif->dma_timeout(drive);
1369 * disable dma for now, but remember that we did so because of
1370 * a timeout -- we'll reenable after we finish this next request
1371 * (or rather the first chunk of it) in pio.
1374 drive->state = DMA_PIO_RETRY;
1375 hwif->dma_off_quietly(drive);
1378 * un-busy drive etc (hwgroup->busy is cleared on return) and
1379 * make sure request is sane
1381 rq = HWGROUP(drive)->rq;
1386 HWGROUP(drive)->rq = NULL;
1393 rq->sector = rq->bio->bi_sector;
1394 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1395 rq->hard_cur_sectors = rq->current_nr_sectors;
1396 rq->buffer = bio_data(rq->bio);
1402 * ide_timer_expiry - handle lack of an IDE interrupt
1403 * @data: timer callback magic (hwgroup)
1405 * An IDE command has timed out before the expected drive return
1406 * occurred. At this point we attempt to clean up the current
1407 * mess. If the current handler includes an expiry handler then
1408 * we invoke the expiry handler, and providing it is happy the
1409 * work is done. If that fails we apply generic recovery rules
1410 * invoking the handler and checking the drive DMA status. We
1411 * have an excessively incestuous relationship with the DMA
1412 * logic that wants cleaning up.
1415 void ide_timer_expiry (unsigned long data)
1417 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1418 ide_handler_t *handler;
1419 ide_expiry_t *expiry;
1420 unsigned long flags;
1421 unsigned long wait = -1;
1423 spin_lock_irqsave(&ide_lock, flags);
1425 if (((handler = hwgroup->handler) == NULL) ||
1426 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
1428 * Either a marginal timeout occurred
1429 * (got the interrupt just as timer expired),
1430 * or we were "sleeping" to give other devices a chance.
1431 * Either way, we don't really want to complain about anything.
1433 if (hwgroup->sleeping) {
1434 hwgroup->sleeping = 0;
1438 ide_drive_t *drive = hwgroup->drive;
1440 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1441 hwgroup->handler = NULL;
1444 ide_startstop_t startstop = ide_stopped;
1445 if (!hwgroup->busy) {
1446 hwgroup->busy = 1; /* paranoia */
1447 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1449 if ((expiry = hwgroup->expiry) != NULL) {
1451 if ((wait = expiry(drive)) > 0) {
1453 hwgroup->timer.expires = jiffies + wait;
1454 hwgroup->req_gen_timer = hwgroup->req_gen;
1455 add_timer(&hwgroup->timer);
1456 spin_unlock_irqrestore(&ide_lock, flags);
1460 hwgroup->handler = NULL;
1462 * We need to simulate a real interrupt when invoking
1463 * the handler() function, which means we need to
1464 * globally mask the specific IRQ:
1466 spin_unlock(&ide_lock);
1468 #if DISABLE_IRQ_NOSYNC
1469 disable_irq_nosync(hwif->irq);
1471 /* disable_irq_nosync ?? */
1472 disable_irq(hwif->irq);
1473 #endif /* DISABLE_IRQ_NOSYNC */
1475 * as if we were handling an interrupt */
1476 local_irq_disable();
1477 if (hwgroup->polling) {
1478 startstop = handler(drive);
1479 } else if (drive_is_ready(drive)) {
1480 if (drive->waiting_for_dma)
1481 hwgroup->hwif->dma_lost_irq(drive);
1482 (void)ide_ack_intr(hwif);
1483 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1484 startstop = handler(drive);
1486 if (drive->waiting_for_dma) {
1487 startstop = ide_dma_timeout_retry(drive, wait);
1490 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
1492 drive->service_time = jiffies - drive->service_start;
1493 spin_lock_irq(&ide_lock);
1494 enable_irq(hwif->irq);
1495 if (startstop == ide_stopped)
1499 ide_do_request(hwgroup, IDE_NO_IRQ);
1500 spin_unlock_irqrestore(&ide_lock, flags);
1504 * unexpected_intr - handle an unexpected IDE interrupt
1505 * @irq: interrupt line
1506 * @hwgroup: hwgroup being processed
1508 * There's nothing really useful we can do with an unexpected interrupt,
1509 * other than reading the status register (to clear it), and logging it.
1510 * There should be no way that an irq can happen before we're ready for it,
1511 * so we needn't worry much about losing an "important" interrupt here.
1513 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1514 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1515 * looks "good", we just ignore the interrupt completely.
1517 * This routine assumes __cli() is in effect when called.
1519 * If an unexpected interrupt happens on irq15 while we are handling irq14
1520 * and if the two interfaces are "serialized" (CMD640), then it looks like
1521 * we could screw up by interfering with a new request being set up for
1524 * In reality, this is a non-issue. The new command is not sent unless
1525 * the drive is ready to accept one, in which case we know the drive is
1526 * not trying to interrupt us. And ide_set_handler() is always invoked
1527 * before completing the issuance of any new drive command, so we will not
1528 * be accidentally invoked as a result of any valid command completion
1531 * Note that we must walk the entire hwgroup here. We know which hwif
1532 * is doing the current command, but we don't know which hwif burped
1536 static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1539 ide_hwif_t *hwif = hwgroup->hwif;
1542 * handle the unexpected interrupt
1545 if (hwif->irq == irq) {
1546 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1547 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1548 /* Try to not flood the console with msgs */
1549 static unsigned long last_msgtime, count;
1551 if (time_after(jiffies, last_msgtime + HZ)) {
1552 last_msgtime = jiffies;
1553 printk(KERN_ERR "%s%s: unexpected interrupt, "
1554 "status=0x%02x, count=%ld\n",
1556 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1560 } while ((hwif = hwif->next) != hwgroup->hwif);
1564 * ide_intr - default IDE interrupt handler
1565 * @irq: interrupt number
1566 * @dev_id: hwif group
1567 * @regs: unused weirdness from the kernel irq layer
1569 * This is the default IRQ handler for the IDE layer. You should
1570 * not need to override it. If you do be aware it is subtle in
1573 * hwgroup->hwif is the interface in the group currently performing
1574 * a command. hwgroup->drive is the drive and hwgroup->handler is
1575 * the IRQ handler to call. As we issue a command the handlers
1576 * step through multiple states, reassigning the handler to the
1577 * next step in the process. Unlike a smart SCSI controller IDE
1578 * expects the main processor to sequence the various transfer
1579 * stages. We also manage a poll timer to catch up with most
1580 * timeout situations. There are still a few where the handlers
1581 * don't ever decide to give up.
1583 * The handler eventually returns ide_stopped to indicate the
1584 * request completed. At this point we issue the next request
1585 * on the hwgroup and the process begins again.
1588 irqreturn_t ide_intr (int irq, void *dev_id)
1590 unsigned long flags;
1591 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1594 ide_handler_t *handler;
1595 ide_startstop_t startstop;
1597 spin_lock_irqsave(&ide_lock, flags);
1598 hwif = hwgroup->hwif;
1600 if (!ide_ack_intr(hwif)) {
1601 spin_unlock_irqrestore(&ide_lock, flags);
1605 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1607 * Not expecting an interrupt from this drive.
1608 * That means this could be:
1609 * (1) an interrupt from another PCI device
1610 * sharing the same PCI INT# as us.
1611 * or (2) a drive just entered sleep or standby mode,
1612 * and is interrupting to let us know.
1613 * or (3) a spurious interrupt of unknown origin.
1615 * For PCI, we cannot tell the difference,
1616 * so in that case we just ignore it and hope it goes away.
1618 * FIXME: unexpected_intr should be hwif-> then we can
1619 * remove all the ifdef PCI crap
1621 #ifdef CONFIG_BLK_DEV_IDEPCI
1622 if (hwif->pci_dev && !hwif->pci_dev->vendor)
1623 #endif /* CONFIG_BLK_DEV_IDEPCI */
1626 * Probably not a shared PCI interrupt,
1627 * so we can safely try to do something about it:
1629 unexpected_intr(irq, hwgroup);
1630 #ifdef CONFIG_BLK_DEV_IDEPCI
1633 * Whack the status register, just in case
1634 * we have a leftover pending IRQ.
1636 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1637 #endif /* CONFIG_BLK_DEV_IDEPCI */
1639 spin_unlock_irqrestore(&ide_lock, flags);
1642 drive = hwgroup->drive;
1645 * This should NEVER happen, and there isn't much
1646 * we could do about it here.
1648 * [Note - this can occur if the drive is hot unplugged]
1650 spin_unlock_irqrestore(&ide_lock, flags);
1653 if (!drive_is_ready(drive)) {
1655 * This happens regularly when we share a PCI IRQ with
1656 * another device. Unfortunately, it can also happen
1657 * with some buggy drives that trigger the IRQ before
1658 * their status register is up to date. Hopefully we have
1659 * enough advance overhead that the latter isn't a problem.
1661 spin_unlock_irqrestore(&ide_lock, flags);
1664 if (!hwgroup->busy) {
1665 hwgroup->busy = 1; /* paranoia */
1666 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1668 hwgroup->handler = NULL;
1670 del_timer(&hwgroup->timer);
1671 spin_unlock(&ide_lock);
1673 /* Some controllers might set DMA INTR no matter DMA or PIO;
1674 * bmdma status might need to be cleared even for
1675 * PIO interrupts to prevent spurious/lost irq.
1677 if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma))
1678 /* ide_dma_end() needs bmdma status for error checking.
1679 * So, skip clearing bmdma status here and leave it
1680 * to ide_dma_end() if this is dma interrupt.
1682 hwif->ide_dma_clear_irq(drive);
1685 local_irq_enable_in_hardirq();
1686 /* service this interrupt, may set handler for next interrupt */
1687 startstop = handler(drive);
1688 spin_lock_irq(&ide_lock);
1691 * Note that handler() may have set things up for another
1692 * interrupt to occur soon, but it cannot happen until
1693 * we exit from this routine, because it will be the
1694 * same irq as is currently being serviced here, and Linux
1695 * won't allow another of the same (on any CPU) until we return.
1697 drive->service_time = jiffies - drive->service_start;
1698 if (startstop == ide_stopped) {
1699 if (hwgroup->handler == NULL) { /* paranoia */
1701 ide_do_request(hwgroup, hwif->irq);
1703 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1704 "on exit\n", drive->name);
1707 spin_unlock_irqrestore(&ide_lock, flags);
1712 * ide_init_drive_cmd - initialize a drive command request
1713 * @rq: request object
1715 * Initialize a request before we fill it in and send it down to
1716 * ide_do_drive_cmd. Commands must be set up by this function. Right
1717 * now it doesn't do a lot, but if that changes abusers will have a
1721 void ide_init_drive_cmd (struct request *rq)
1723 memset(rq, 0, sizeof(*rq));
1724 rq->cmd_type = REQ_TYPE_ATA_CMD;
1728 EXPORT_SYMBOL(ide_init_drive_cmd);
1731 * ide_do_drive_cmd - issue IDE special command
1732 * @drive: device to issue command
1733 * @rq: request to issue
1734 * @action: action for processing
1736 * This function issues a special IDE device request
1737 * onto the request queue.
1739 * If action is ide_wait, then the rq is queued at the end of the
1740 * request queue, and the function sleeps until it has been processed.
1741 * This is for use when invoked from an ioctl handler.
1743 * If action is ide_preempt, then the rq is queued at the head of
1744 * the request queue, displacing the currently-being-processed
1745 * request and this function returns immediately without waiting
1746 * for the new rq to be completed. This is VERY DANGEROUS, and is
1747 * intended for careful use by the ATAPI tape/cdrom driver code.
1749 * If action is ide_end, then the rq is queued at the end of the
1750 * request queue, and the function returns immediately without waiting
1751 * for the new rq to be completed. This is again intended for careful
1752 * use by the ATAPI tape/cdrom driver code.
1755 int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1757 unsigned long flags;
1758 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1759 DECLARE_COMPLETION_ONSTACK(wait);
1760 int where = ELEVATOR_INSERT_BACK, err;
1761 int must_wait = (action == ide_wait || action == ide_head_wait);
1766 * we need to hold an extra reference to request for safe inspection
1771 rq->end_io_data = &wait;
1772 rq->end_io = blk_end_sync_rq;
1775 spin_lock_irqsave(&ide_lock, flags);
1776 if (action == ide_preempt)
1778 if (action == ide_preempt || action == ide_head_wait) {
1779 where = ELEVATOR_INSERT_FRONT;
1780 rq->cmd_flags |= REQ_PREEMPT;
1782 __elv_add_request(drive->queue, rq, where, 0);
1783 ide_do_request(hwgroup, IDE_NO_IRQ);
1784 spin_unlock_irqrestore(&ide_lock, flags);
1788 wait_for_completion(&wait);
1792 blk_put_request(rq);
1798 EXPORT_SYMBOL(ide_do_drive_cmd);