X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fblock%2Fub.c;h=3a281ef11ffa9a96543358dc91c817805eedc981;hb=4f9faaace217b854ae1e1746ee4ac44688ddadbd;hp=a70c1c29a7aa2c28e99c9fa352fa6f42b2048d17;hpb=6208e77e7fa9e69f399fddc55b1cf9527fbde599;p=linux-2.6 diff --git a/drivers/block/ub.c b/drivers/block/ub.c index a70c1c29a7..3a281ef11f 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -8,6 +8,7 @@ * and is not licensed separately. See file COPYING for details. * * TODO (sorted by decreasing priority) + * -- Return sense now that rq allows it (we always auto-sense anyway). * -- set readonly flag for CDs, set removable flag for CF readers * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) * -- verify the 13 conditions and do bulk resets @@ -204,6 +205,7 @@ struct ub_scsi_cmd { unsigned char key, asc, ascq; /* May be valid if error==-EIO */ int stat_count; /* Retries getting status. */ + unsigned int timeo; /* jiffies until rq->timeout changes */ unsigned int len; /* Requested length */ unsigned int current_sg; @@ -317,6 +319,7 @@ struct ub_dev { int openc; /* protected by ub_lock! */ /* kref is too implicit for our taste */ int reset; /* Reset is running */ + int bad_resid; unsigned int tagcnt; char name[12]; struct usb_device *dev; @@ -359,7 +362,8 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, struct ub_scsi_cmd *cmd, struct ub_request *urq); static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_end_rq(struct request *rq, unsigned int status); +static void ub_end_rq(struct request *rq, unsigned int status, + unsigned int cmd_len); static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, struct ub_request *urq, struct ub_scsi_cmd *cmd); static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); @@ -642,13 +646,13 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) if (atomic_read(&sc->poison)) { blkdev_dequeue_request(rq); - ub_end_rq(rq, DID_NO_CONNECT << 16); + ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); return 0; } if (lun->changed && !blk_pc_request(rq)) { blkdev_dequeue_request(rq); - ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); + ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); return 0; } @@ -657,7 +661,6 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) if ((cmd = ub_get_cmd(lun)) == NULL) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); - sg_init_table(cmd->sgv, UB_MAX_REQ_SG); blkdev_dequeue_request(rq); @@ -668,6 +671,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) /* * get scatterlist from block layer */ + sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG); n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); if (n_elem < 0) { /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ @@ -701,7 +705,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) drop: ub_put_cmd(lun, cmd); - ub_end_rq(rq, DID_ERROR << 16); + ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); return 0; } @@ -762,6 +766,12 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, cmd->cdb_len = rq->cmd_len; cmd->len = rq->data_len; + + /* + * To reapply this to every URB is not as incorrect as it looks. + * In return, we avoid any complicated tracking calculations. + */ + cmd->timeo = rq->timeout; } static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) @@ -770,6 +780,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) struct ub_request *urq = cmd->back; struct request *rq; unsigned int scsi_status; + unsigned int cmd_len; rq = urq->rq; @@ -779,8 +790,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) rq->data_len = 0; else rq->data_len -= cmd->act_len; + scsi_status = 0; + } else { + if (cmd->act_len != cmd->len) { + scsi_status = SAM_STAT_CHECK_CONDITION; + } else { + scsi_status = 0; + } } - scsi_status = 0; } else { if (blk_pc_request(rq)) { /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ @@ -791,7 +808,10 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) else scsi_status = DID_ERROR << 16; } else { - if (cmd->error == -EIO) { + if (cmd->error == -EIO && + (cmd->key == 0 || + cmd->key == MEDIUM_ERROR || + cmd->key == UNIT_ATTENTION)) { if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) return; } @@ -801,14 +821,17 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) urq->rq = NULL; + cmd_len = cmd->len; ub_put_cmd(lun, cmd); - ub_end_rq(rq, scsi_status); + ub_end_rq(rq, scsi_status, cmd_len); blk_start_queue(lun->disk->queue); } -static void ub_end_rq(struct request *rq, unsigned int scsi_status) +static void ub_end_rq(struct request *rq, unsigned int scsi_status, + unsigned int cmd_len) { int error; + long rqlen; if (scsi_status == 0) { error = 0; @@ -816,8 +839,12 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status) error = -EIO; rq->errors = scsi_status; } - if (__blk_end_request(rq, error, blk_rq_bytes(rq))) - BUG(); + rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ + if (__blk_end_request(rq, error, cmd_len)) { + printk(KERN_WARNING DRV_NAME + ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n", + blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen); + } } static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, @@ -1239,14 +1266,19 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) return; } - len = le32_to_cpu(bcs->Residue); - if (len != cmd->len - cmd->act_len) { - /* - * It is all right to transfer less, the caller has - * to check. But it's not all right if the device - * counts disagree with our counts. - */ - goto Bad_End; + if (!sc->bad_resid) { + len = le32_to_cpu(bcs->Residue); + if (len != cmd->len - cmd->act_len) { + /* + * Only start ignoring if this cmd ended well. + */ + if (cmd->len == cmd->act_len) { + printk(KERN_NOTICE "%s: " + "bad residual %d of %d, ignoring\n", + sc->name, len, cmd->len); + sc->bad_resid = 1; + } + } } switch (bcs->Status) { @@ -1277,8 +1309,7 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) ub_state_done(sc, cmd, -EIO); } else { - printk(KERN_WARNING "%s: " - "wrong command state %d\n", + printk(KERN_WARNING "%s: wrong command state %d\n", sc->name, cmd->state); ub_state_done(sc, cmd, -EINVAL); return; @@ -1316,7 +1347,10 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) return; } - sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; + if (cmd->timeo) + sc->work_timer.expires = jiffies + cmd->timeo; + else + sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; add_timer(&sc->work_timer); cmd->state = UB_CMDST_DATA; @@ -1356,7 +1390,10 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) return -1; } - sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; + if (cmd->timeo) + sc->work_timer.expires = jiffies + cmd->timeo; + else + sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; add_timer(&sc->work_timer); return 0; } @@ -1495,8 +1532,7 @@ static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) return; } if (cmd->state != UB_CMDST_SENSE) { - printk(KERN_WARNING "%s: " - "sense done with bad cmd state %d\n", + printk(KERN_WARNING "%s: sense done with bad cmd state %d\n", sc->name, cmd->state); return; } @@ -1700,7 +1736,7 @@ static int ub_bd_ioctl(struct inode *inode, struct file *filp, } /* - * This is called once a new disk was seen by the block layer or by ub_probe(). + * This is called by check_disk_change if we reported a media change. * The main onjective here is to discover the features of the media such as * the capacity, read-only status, etc. USB storage generally does not * need to be spun up, but if we needed it, this would be the place. @@ -2116,8 +2152,7 @@ static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, } if (ep_in == NULL || ep_out == NULL) { - printk(KERN_NOTICE "%s: failed endpoint check\n", - sc->name); + printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name); return -ENODEV; } @@ -2334,7 +2369,7 @@ static void ub_disconnect(struct usb_interface *intf) spin_unlock_irqrestore(&ub_lock, flags); /* - * Fence stall clearnings, operations triggered by unlinkings and so on. + * Fence stall clearings, operations triggered by unlinkings and so on. * We do not attempt to unlink any URBs, because we do not trust the * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. */ @@ -2379,7 +2414,7 @@ static void ub_disconnect(struct usb_interface *intf) del_gendisk(lun->disk); /* * I wish I could do: - * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); + * queue_flag_set(QUEUE_FLAG_DEAD, q); * As it is, we rely on our internal poisoning and let * the upper levels to spin furiously failing all the I/O. */ @@ -2397,7 +2432,7 @@ static void ub_disconnect(struct usb_interface *intf) spin_unlock_irqrestore(sc->lock, flags); /* - * There is virtually no chance that other CPU runs times so long + * There is virtually no chance that other CPU runs a timeout so long * after ub_urb_complete should have called del_timer, but only if HCD * didn't forget to deliver a callback on unlink. */