}
static void scsi_run_queue(struct request_queue *q);
-static void scsi_release_buffers(struct scsi_cmnd *cmd);
/*
* Function: scsi_unprep_request()
req->flags &= ~REQ_DONTPREP;
req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
- scsi_release_buffers(cmd);
scsi_put_command(cmd);
}
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
- if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
+ if (unlikely(scsi_host_in_recovery(shost) &&
shost->host_failed))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
* if sg table allocation fails, requeue request later.
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
- if (unlikely(!sgpnt))
+ if (unlikely(!sgpnt)) {
+ scsi_unprep_request(req);
return BLKPREP_DEFER;
+ }
cmd->request_buffer = (char *) sgpnt;
cmd->request_bufflen = req->nr_sectors << 9;
*/
ret = scsi_init_io(cmd);
switch(ret) {
+ /* For BLKPREP_KILL/DEFER the cmd was released */
case BLKPREP_KILL:
- /* BLKPREP_KILL return also releases the command */
goto kill;
case BLKPREP_DEFER:
goto defer;
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
- if (shost->shost_state == SHOST_RECOVERY)
+ if (scsi_host_in_recovery(shost))
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
* cases (host limits or settings) should run the queue at some
* later time.
*/
- scsi_unprep_request(req);
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
sdev->device_busy--;