2 * The low performance USB storage driver (ub).
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
10 * TODO (sorted by decreasing priority)
11 * -- set readonly flag for CDs, set removable flag for CF readers
12 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
13 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
14 * -- verify the 13 conditions and do bulk resets
15 * -- kill last_pipe and simply do two-state clearing on both pipes
17 * -- move top_sense and work_bcs into separate allocations (if they survive)
18 * for cache purists and esoteric architectures.
19 * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
20 * -- prune comments, they are too volumnous
21 * -- Exterminate P3 printks
23 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
24 * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/usb.h>
29 #include <linux/usb_usual.h>
30 #include <linux/blkdev.h>
31 #include <linux/devfs_fs_kernel.h>
32 #include <linux/timer.h>
33 #include <scsi/scsi.h>
36 #define DEVFS_NAME DRV_NAME
41 * The command state machine is the key model for understanding of this driver.
43 * The general rule is that all transitions are done towards the bottom
44 * of the diagram, thus preventing any loops.
46 * An exception to that is how the STAT state is handled. A counter allows it
47 * to be re-entered along the path marked with [C].
53 * ub_scsi_cmd_start fails ->--------------------------------------\
60 * was -EPIPE -->-------------------------------->! CLEAR ! !
63 * was error -->------------------------------------- ! --------->\
65 * /--<-- cmd->dir == NONE ? ! !
72 * ! was -EPIPE -->--------------->! CLR2STS ! ! !
75 * ! ! was error -->---- ! --------->\
76 * ! was error -->--------------------- ! ------------- ! --------->\
79 * \--->+--------+ ! ! !
80 * ! STAT !<--------------------------/ ! !
83 * [C] was -EPIPE -->-----------\ ! !
85 * +<---- len == 0 ! ! !
87 * ! was error -->--------------------------------------!---------->\
89 * +<---- bad CSW ! ! !
90 * +<---- bad tag ! ! !
96 * \------- ! --------------------[C]--------\ ! !
98 * cmd->error---\ +--------+ ! !
99 * ! +--------------->! SENSE !<----------/ !
100 * STAT_FAIL----/ +--------+ !
103 * \--------------------------------\--------------------->! DONE !
108 * This many LUNs per USB device.
109 * Every one of them takes a host, see UB_MAX_HOSTS.
111 #define UB_MAX_LUNS 9
116 #define UB_PARTS_PER_LUN 8
118 #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
120 #define UB_SENSE_SIZE 18
125 /* command block wrapper */
126 struct bulk_cb_wrap {
127 __le32 Signature; /* contains 'USBC' */
128 u32 Tag; /* unique per command id */
129 __le32 DataTransferLength; /* size of data */
130 u8 Flags; /* direction in bit 0 */
132 u8 Length; /* of of the CDB */
133 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
136 #define US_BULK_CB_WRAP_LEN 31
137 #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
138 #define US_BULK_FLAG_IN 1
139 #define US_BULK_FLAG_OUT 0
141 /* command status wrapper */
142 struct bulk_cs_wrap {
143 __le32 Signature; /* should = 'USBS' */
144 u32 Tag; /* same as original command */
145 __le32 Residue; /* amount not transferred */
146 u8 Status; /* see below */
149 #define US_BULK_CS_WRAP_LEN 13
150 #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
151 #define US_BULK_STAT_OK 0
152 #define US_BULK_STAT_FAIL 1
153 #define US_BULK_STAT_PHASE 2
155 /* bulk-only class specific requests */
156 #define US_BULK_RESET_REQUEST 0xff
157 #define US_BULK_GET_MAX_LUN 0xfe
163 #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
164 #define UB_MAX_SECTORS 64
167 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
168 * even if a webcam hogs the bus, but some devices need time to spin up.
170 #define UB_URB_TIMEOUT (HZ*2)
171 #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
172 #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
173 #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
176 * An instance of a SCSI command in transit.
178 #define UB_DIR_NONE 0
179 #define UB_DIR_READ 1
180 #define UB_DIR_ILLEGAL2 2
181 #define UB_DIR_WRITE 3
183 #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
184 (((c)==UB_DIR_READ)? 'r': 'n'))
186 enum ub_scsi_cmd_state {
187 UB_CMDST_INIT, /* Initial state */
188 UB_CMDST_CMD, /* Command submitted */
189 UB_CMDST_DATA, /* Data phase */
190 UB_CMDST_CLR2STS, /* Clearing before requesting status */
191 UB_CMDST_STAT, /* Status phase */
192 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
193 UB_CMDST_CLRRS, /* Clearing before retrying status */
194 UB_CMDST_SENSE, /* Sending Request Sense */
195 UB_CMDST_DONE /* Final state */
198 static char *ub_scsi_cmd_stname[] = {
211 unsigned char cdb[UB_MAX_CDB_SIZE];
212 unsigned char cdb_len;
214 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
215 unsigned char trace_index;
216 enum ub_scsi_cmd_state state;
218 struct ub_scsi_cmd *next;
220 int error; /* Return code - valid upon done */
221 unsigned int act_len; /* Return size */
222 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
224 int stat_count; /* Retries getting status. */
226 unsigned int len; /* Requested length */
227 unsigned int current_sg;
228 unsigned int nsg; /* sgv[nsg] */
229 struct scatterlist sgv[UB_MAX_REQ_SG];
232 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
238 unsigned int current_try;
239 unsigned int nsg; /* sgv[nsg] */
240 struct scatterlist sgv[UB_MAX_REQ_SG];
246 unsigned long nsec; /* Linux size - 512 byte sectors */
247 unsigned int bsize; /* Linux hardsect_size */
248 unsigned int bshift; /* Shift between 512 and hard sects */
252 * The SCSI command tracing structure.
255 #define SCMD_ST_HIST_SZ 8
256 #define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */
258 struct ub_scsi_cmd_trace {
261 unsigned int req_size, act_size;
264 unsigned char key, asc, ascq;
265 char st_hst[SCMD_ST_HIST_SZ];
268 struct ub_scsi_trace {
270 struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ];
274 * This is a direct take-off from linux/include/completion.h
275 * The difference is that I do not wait on this thing, just poll.
276 * When I want to wait (ub_probe), I just use the stock completion.
278 * Note that INIT_COMPLETION takes no lock. It is correct. But why
279 * in the bloody hell that thing takes struct instead of pointer to struct
280 * is quite beyond me. I just copied it from the stock completion.
282 struct ub_completion {
287 static inline void ub_init_completion(struct ub_completion *x)
290 spin_lock_init(&x->lock);
293 #define UB_INIT_COMPLETION(x) ((x).done = 0)
295 static void ub_complete(struct ub_completion *x)
299 spin_lock_irqsave(&x->lock, flags);
301 spin_unlock_irqrestore(&x->lock, flags);
304 static int ub_is_completed(struct ub_completion *x)
309 spin_lock_irqsave(&x->lock, flags);
311 spin_unlock_irqrestore(&x->lock, flags);
317 struct ub_scsi_cmd_queue {
319 struct ub_scsi_cmd *head, *tail;
323 * The block device instance (one per LUN).
327 struct list_head link;
328 struct gendisk *disk;
329 int id; /* Host index */
330 int num; /* LUN number */
333 int changed; /* Media was changed */
337 struct ub_request urq;
339 /* Use Ingo's mempool if or when we have more than one command. */
341 * Currently we never need more than one command for the whole device.
342 * However, giving every LUN a command is a cheap and automatic way
343 * to enforce fairness between them.
346 struct ub_scsi_cmd cmdv[1];
348 struct ub_capacity capacity;
352 * The USB device instance.
356 atomic_t poison; /* The USB device is disconnected */
357 int openc; /* protected by ub_lock! */
358 /* kref is too implicit for our taste */
359 int reset; /* Reset is running */
362 struct usb_device *dev;
363 struct usb_interface *intf;
365 struct list_head luns;
367 unsigned int send_bulk_pipe; /* cached pipe values */
368 unsigned int recv_bulk_pipe;
369 unsigned int send_ctrl_pipe;
370 unsigned int recv_ctrl_pipe;
372 struct tasklet_struct tasklet;
374 struct ub_scsi_cmd_queue cmd_queue;
375 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
376 unsigned char top_sense[UB_SENSE_SIZE];
378 struct ub_completion work_done;
380 struct timer_list work_timer;
381 int last_pipe; /* What might need clearing */
382 __le32 signature; /* Learned signature */
383 struct bulk_cb_wrap work_bcb;
384 struct bulk_cs_wrap work_bcs;
385 struct usb_ctrlrequest work_cr;
387 struct work_struct reset_work;
388 wait_queue_head_t reset_wait;
391 struct ub_scsi_trace tr;
396 static void ub_cleanup(struct ub_dev *sc);
397 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
398 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
399 struct ub_scsi_cmd *cmd, struct ub_request *urq);
400 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
401 struct ub_scsi_cmd *cmd, struct ub_request *urq);
402 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
403 static void ub_end_rq(struct request *rq, int uptodate);
404 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
405 struct ub_request *urq, struct ub_scsi_cmd *cmd);
406 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
407 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
408 static void ub_scsi_action(unsigned long _dev);
409 static void ub_scsi_dispatch(struct ub_dev *sc);
410 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
411 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
412 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
413 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
414 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
415 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
416 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
417 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
419 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
420 static void ub_reset_enter(struct ub_dev *sc, int try);
421 static void ub_reset_task(void *arg);
422 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
423 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
424 struct ub_capacity *ret);
425 static int ub_sync_reset(struct ub_dev *sc);
426 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
427 static int ub_probe_lun(struct ub_dev *sc, int lnum);
431 #ifdef CONFIG_USB_LIBUSUAL
433 #define ub_usb_ids storage_usb_ids
436 static struct usb_device_id ub_usb_ids[] = {
437 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
441 MODULE_DEVICE_TABLE(usb, ub_usb_ids);
442 #endif /* CONFIG_USB_LIBUSUAL */
445 * Find me a way to identify "next free minor" for add_disk(),
446 * and the array disappears the next day. However, the number of
447 * hosts has something to do with the naming and /proc/partitions.
448 * This has to be thought out in detail before changing.
449 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
451 #define UB_MAX_HOSTS 26
452 static char ub_hostv[UB_MAX_HOSTS];
454 #define UB_QLOCK_NUM 5
455 static spinlock_t ub_qlockv[UB_QLOCK_NUM];
456 static int ub_qlock_next = 0;
458 static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
461 * The SCSI command tracing procedures.
464 static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
467 struct ub_scsi_cmd_trace *t;
469 if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0;
472 memset(t, 0, sizeof(struct ub_scsi_cmd_trace));
476 t->req_size = cmd->len;
477 t->st_hst[0] = cmd->state;
480 cmd->trace_index = n;
483 static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
486 struct ub_scsi_cmd_trace *t;
488 t = &sc->tr.vec[cmd->trace_index];
489 if (t->tag == cmd->tag) {
490 if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0;
491 t->st_hst[n] = cmd->state;
496 static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
498 struct ub_scsi_cmd_trace *t;
500 t = &sc->tr.vec[cmd->trace_index];
501 if (t->tag == cmd->tag)
502 t->act_size = cmd->act_len;
505 static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
506 unsigned char *sense)
508 struct ub_scsi_cmd_trace *t;
510 t = &sc->tr.vec[cmd->trace_index];
511 if (t->tag == cmd->tag) {
512 t->key = sense[2] & 0x0F;
518 static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
521 struct usb_interface *intf;
529 struct ub_scsi_cmd_trace *t;
531 intf = to_usb_interface(dev);
532 sc = usb_get_intfdata(intf);
537 spin_lock_irqsave(sc->lock, flags);
539 cnt += sprintf(page + cnt,
540 "poison %d reset %d\n",
541 atomic_read(&sc->poison), sc->reset);
542 cnt += sprintf(page + cnt,
544 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
545 cnt += sprintf(page + cnt,
546 "sg %d %d %d %d %d .. %d\n",
554 list_for_each (p, &sc->luns) {
555 lun = list_entry(p, struct ub_lun, link);
556 cnt += sprintf(page + cnt,
557 "lun %u changed %d removable %d readonly %d\n",
558 lun->num, lun->changed, lun->removable, lun->readonly);
561 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
562 for (j = 0; j < SCMD_TRACE_SZ; j++) {
565 cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op);
566 if (t->op == REQUEST_SENSE) {
567 cnt += sprintf(page + cnt, " [sense %x %02x %02x]",
568 t->key, t->asc, t->ascq);
570 cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir));
571 cnt += sprintf(page + cnt, " [%5d %5d]",
572 t->req_size, t->act_size);
574 if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0;
575 for (i = 0; i < SCMD_ST_HIST_SZ; i++) {
576 cnt += sprintf(page + cnt, " %s",
577 ub_scsi_cmd_stname[(int)t->st_hst[nh]]);
578 if (++nh == SCMD_ST_HIST_SZ) nh = 0;
580 cnt += sprintf(page + cnt, "\n");
582 if (++nc == SCMD_TRACE_SZ) nc = 0;
585 spin_unlock_irqrestore(sc->lock, flags);
589 static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */
594 * This also stores the host for indexing by minor, which is somewhat dirty.
596 static int ub_id_get(void)
601 spin_lock_irqsave(&ub_lock, flags);
602 for (i = 0; i < UB_MAX_HOSTS; i++) {
603 if (ub_hostv[i] == 0) {
605 spin_unlock_irqrestore(&ub_lock, flags);
609 spin_unlock_irqrestore(&ub_lock, flags);
613 static void ub_id_put(int id)
617 if (id < 0 || id >= UB_MAX_HOSTS) {
618 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
622 spin_lock_irqsave(&ub_lock, flags);
623 if (ub_hostv[id] == 0) {
624 spin_unlock_irqrestore(&ub_lock, flags);
625 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
629 spin_unlock_irqrestore(&ub_lock, flags);
633 * This is necessitated by the fact that blk_cleanup_queue does not
634 * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
635 * Since our blk_init_queue() passes a spinlock common with ub_dev,
636 * we have life time issues when ub_cleanup frees ub_dev.
638 static spinlock_t *ub_next_lock(void)
643 spin_lock_irqsave(&ub_lock, flags);
644 ret = &ub_qlockv[ub_qlock_next];
645 ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
646 spin_unlock_irqrestore(&ub_lock, flags);
651 * Downcount for deallocation. This rides on two assumptions:
652 * - once something is poisoned, its refcount cannot grow
653 * - opens cannot happen at this time (del_gendisk was done)
654 * If the above is true, we can drop the lock, which we need for
655 * blk_cleanup_queue(): the silly thing may attempt to sleep.
656 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
658 static void ub_put(struct ub_dev *sc)
662 spin_lock_irqsave(&ub_lock, flags);
664 if (sc->openc == 0 && atomic_read(&sc->poison)) {
665 spin_unlock_irqrestore(&ub_lock, flags);
668 spin_unlock_irqrestore(&ub_lock, flags);
673 * Final cleanup and deallocation.
675 static void ub_cleanup(struct ub_dev *sc)
681 while (!list_empty(&sc->luns)) {
683 lun = list_entry(p, struct ub_lun, link);
686 /* I don't think queue can be NULL. But... Stolen from sx8.c */
687 if ((q = lun->disk->queue) != NULL)
688 blk_cleanup_queue(q);
690 * If we zero disk->private_data BEFORE put_disk, we have
691 * to check for NULL all over the place in open, release,
692 * check_media and revalidate, because the block level
693 * semaphore is well inside the put_disk.
694 * But we cannot zero after the call, because *disk is gone.
695 * The sd.c is blatantly racy in this area.
697 /* disk->private_data = NULL; */
709 * The "command allocator".
711 static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
713 struct ub_scsi_cmd *ret;
722 static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
724 if (cmd != &lun->cmdv[0]) {
725 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
730 printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
739 static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
741 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
743 if (t->qlen++ == 0) {
751 if (t->qlen > t->qmax)
755 static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
757 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
759 if (t->qlen++ == 0) {
767 if (t->qlen > t->qmax)
771 static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
773 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
774 struct ub_scsi_cmd *cmd;
786 #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
789 * The request function is our main entry point
792 static void ub_request_fn(request_queue_t *q)
794 struct ub_lun *lun = q->queuedata;
797 while ((rq = elv_next_request(q)) != NULL) {
798 if (ub_request_fn_1(lun, rq) != 0) {
805 static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
807 struct ub_dev *sc = lun->udev;
808 struct ub_scsi_cmd *cmd;
809 struct ub_request *urq;
812 if (atomic_read(&sc->poison) || lun->changed) {
813 blkdev_dequeue_request(rq);
818 if (lun->urq.rq != NULL)
820 if ((cmd = ub_get_cmd(lun)) == NULL)
822 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
824 blkdev_dequeue_request(rq);
827 memset(urq, 0, sizeof(struct ub_request));
831 * get scatterlist from block layer
833 n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
835 printk(KERN_INFO "%s: failed request map (%d)\n",
836 lun->name, n_elem); /* P3 */
839 if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
840 printk(KERN_WARNING "%s: request with %d segments\n",
845 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
847 if (blk_pc_request(rq)) {
848 ub_cmd_build_packet(sc, lun, cmd, urq);
850 ub_cmd_build_block(sc, lun, cmd, urq);
852 cmd->state = UB_CMDST_INIT;
854 cmd->done = ub_rw_cmd_done;
857 cmd->tag = sc->tagcnt++;
858 if (ub_submit_scsi(sc, cmd) != 0)
864 ub_put_cmd(lun, cmd);
869 static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
870 struct ub_scsi_cmd *cmd, struct ub_request *urq)
872 struct request *rq = urq->rq;
873 unsigned int block, nblks;
875 if (rq_data_dir(rq) == WRITE)
876 cmd->dir = UB_DIR_WRITE;
878 cmd->dir = UB_DIR_READ;
881 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
886 * The call to blk_queue_hardsect_size() guarantees that request
887 * is aligned, but it is given in terms of 512 byte units, always.
889 block = rq->sector >> lun->capacity.bshift;
890 nblks = rq->nr_sectors >> lun->capacity.bshift;
892 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
893 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
894 cmd->cdb[2] = block >> 24;
895 cmd->cdb[3] = block >> 16;
896 cmd->cdb[4] = block >> 8;
898 cmd->cdb[7] = nblks >> 8;
902 cmd->len = rq->nr_sectors * 512;
905 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
906 struct ub_scsi_cmd *cmd, struct ub_request *urq)
908 struct request *rq = urq->rq;
910 if (rq->data_len == 0) {
911 cmd->dir = UB_DIR_NONE;
913 if (rq_data_dir(rq) == WRITE)
914 cmd->dir = UB_DIR_WRITE;
916 cmd->dir = UB_DIR_READ;
920 memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
922 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
923 cmd->cdb_len = rq->cmd_len;
925 cmd->len = rq->data_len;
928 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
930 struct ub_lun *lun = cmd->lun;
931 struct ub_request *urq = cmd->back;
937 if (cmd->error == 0) {
940 if (blk_pc_request(rq)) {
941 if (cmd->act_len >= rq->data_len)
944 rq->data_len -= cmd->act_len;
949 if (blk_pc_request(rq)) {
950 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
951 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
952 rq->sense_len = UB_SENSE_SIZE;
953 if (sc->top_sense[0] != 0)
954 rq->errors = SAM_STAT_CHECK_CONDITION;
956 rq->errors = DID_ERROR << 16;
958 if (cmd->error == -EIO) {
959 if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
967 ub_put_cmd(lun, cmd);
968 ub_end_rq(rq, uptodate);
969 blk_start_queue(lun->disk->queue);
972 static void ub_end_rq(struct request *rq, int uptodate)
974 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
975 end_that_request_last(rq, uptodate);
978 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
979 struct ub_request *urq, struct ub_scsi_cmd *cmd)
982 if (atomic_read(&sc->poison))
985 ub_reset_enter(sc, urq->current_try);
987 if (urq->current_try >= 3)
990 /* P3 */ printk("%s: dir %c len/act %d/%d "
991 "[sense %x %02x %02x] retry %d\n",
992 sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
993 cmd->key, cmd->asc, cmd->ascq, urq->current_try);
995 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
996 ub_cmd_build_block(sc, lun, cmd, urq);
998 cmd->state = UB_CMDST_INIT;
1000 cmd->done = ub_rw_cmd_done;
1003 cmd->tag = sc->tagcnt++;
1005 #if 0 /* Wasteful */
1006 return ub_submit_scsi(sc, cmd);
1008 ub_cmdq_add(sc, cmd);
1014 * Submit a regular SCSI operation (not an auto-sense).
1016 * The Iron Law of Good Submit Routine is:
1017 * Zero return - callback is done, Nonzero return - callback is not done.
1020 * Host is assumed locked.
1022 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1025 if (cmd->state != UB_CMDST_INIT ||
1026 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
1030 ub_cmdq_add(sc, cmd);
1032 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
1033 * safer to jump to a tasklet, in case upper layers do something silly.
1035 tasklet_schedule(&sc->tasklet);
1040 * Submit the first URB for the queued command.
1041 * This function does not deal with queueing in any way.
1043 static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1045 struct bulk_cb_wrap *bcb;
1048 bcb = &sc->work_bcb;
1051 * ``If the allocation length is eighteen or greater, and a device
1052 * server returns less than eithteen bytes of data, the application
1053 * client should assume that the bytes not transferred would have been
1054 * zeroes had the device server returned those bytes.''
1056 * We zero sense for all commands so that when a packet request
1057 * fails it does not return a stale sense.
1059 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
1061 /* set up the command wrapper */
1062 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
1063 bcb->Tag = cmd->tag; /* Endianness is not important */
1064 bcb->DataTransferLength = cpu_to_le32(cmd->len);
1065 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
1066 bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
1067 bcb->Length = cmd->cdb_len;
1069 /* copy the command payload */
1070 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
1072 UB_INIT_COMPLETION(sc->work_done);
1074 sc->last_pipe = sc->send_bulk_pipe;
1075 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1076 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1078 /* Fill what we shouldn't be filling, because usb-storage did so. */
1079 sc->work_urb.actual_length = 0;
1080 sc->work_urb.error_count = 0;
1081 sc->work_urb.status = 0;
1083 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1084 /* XXX Clear stalls */
1085 ub_complete(&sc->work_done);
1089 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
1090 add_timer(&sc->work_timer);
1092 cmd->state = UB_CMDST_CMD;
1093 ub_cmdtr_state(sc, cmd);
1100 static void ub_urb_timeout(unsigned long arg)
1102 struct ub_dev *sc = (struct ub_dev *) arg;
1103 unsigned long flags;
1105 spin_lock_irqsave(sc->lock, flags);
1106 if (!ub_is_completed(&sc->work_done))
1107 usb_unlink_urb(&sc->work_urb);
1108 spin_unlock_irqrestore(sc->lock, flags);
1112 * Completion routine for the work URB.
1114 * This can be called directly from usb_submit_urb (while we have
1115 * the sc->lock taken) and from an interrupt (while we do NOT have
1116 * the sc->lock taken). Therefore, bounce this off to a tasklet.
1118 static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
1120 struct ub_dev *sc = urb->context;
1122 ub_complete(&sc->work_done);
1123 tasklet_schedule(&sc->tasklet);
1126 static void ub_scsi_action(unsigned long _dev)
1128 struct ub_dev *sc = (struct ub_dev *) _dev;
1129 unsigned long flags;
1131 spin_lock_irqsave(sc->lock, flags);
1132 ub_scsi_dispatch(sc);
1133 spin_unlock_irqrestore(sc->lock, flags);
1136 static void ub_scsi_dispatch(struct ub_dev *sc)
1138 struct ub_scsi_cmd *cmd;
1141 while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
1142 if (cmd->state == UB_CMDST_DONE) {
1144 (*cmd->done)(sc, cmd);
1145 } else if (cmd->state == UB_CMDST_INIT) {
1146 ub_cmdtr_new(sc, cmd);
1147 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
1150 cmd->state = UB_CMDST_DONE;
1151 ub_cmdtr_state(sc, cmd);
1153 if (!ub_is_completed(&sc->work_done))
1155 del_timer(&sc->work_timer);
1156 ub_scsi_urb_compl(sc, cmd);
1161 static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1163 struct urb *urb = &sc->work_urb;
1164 struct bulk_cs_wrap *bcs;
1168 if (atomic_read(&sc->poison)) {
1169 ub_state_done(sc, cmd, -ENODEV);
1173 if (cmd->state == UB_CMDST_CLEAR) {
1174 if (urb->status == -EPIPE) {
1176 * STALL while clearning STALL.
1177 * The control pipe clears itself - nothing to do.
1179 printk(KERN_NOTICE "%s: stall on control pipe\n",
1185 * We ignore the result for the halt clear.
1188 /* reset the endpoint toggle */
1189 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1190 usb_pipeout(sc->last_pipe), 0);
1192 ub_state_sense(sc, cmd);
1194 } else if (cmd->state == UB_CMDST_CLR2STS) {
1195 if (urb->status == -EPIPE) {
1196 printk(KERN_NOTICE "%s: stall on control pipe\n",
1202 * We ignore the result for the halt clear.
1205 /* reset the endpoint toggle */
1206 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1207 usb_pipeout(sc->last_pipe), 0);
1209 ub_state_stat(sc, cmd);
1211 } else if (cmd->state == UB_CMDST_CLRRS) {
1212 if (urb->status == -EPIPE) {
1213 printk(KERN_NOTICE "%s: stall on control pipe\n",
1219 * We ignore the result for the halt clear.
1222 /* reset the endpoint toggle */
1223 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1224 usb_pipeout(sc->last_pipe), 0);
1226 ub_state_stat_counted(sc, cmd);
1228 } else if (cmd->state == UB_CMDST_CMD) {
1229 switch (urb->status) {
1235 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1237 printk(KERN_NOTICE "%s: "
1238 "unable to submit clear (%d)\n",
1241 * This is typically ENOMEM or some other such shit.
1242 * Retrying is pointless. Just do Bad End on it...
1244 ub_state_done(sc, cmd, rc);
1247 cmd->state = UB_CMDST_CLEAR;
1248 ub_cmdtr_state(sc, cmd);
1250 case -ESHUTDOWN: /* unplug */
1251 case -EILSEQ: /* unplug timeout on uhci */
1252 ub_state_done(sc, cmd, -ENODEV);
1257 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1261 if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
1262 ub_state_stat(sc, cmd);
1266 // udelay(125); // usb-storage has this
1267 ub_data_start(sc, cmd);
1269 } else if (cmd->state == UB_CMDST_DATA) {
1270 if (urb->status == -EPIPE) {
1271 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1273 printk(KERN_NOTICE "%s: "
1274 "unable to submit clear (%d)\n",
1276 ub_state_done(sc, cmd, rc);
1279 cmd->state = UB_CMDST_CLR2STS;
1280 ub_cmdtr_state(sc, cmd);
1283 if (urb->status == -EOVERFLOW) {
1285 * A babble? Failure, but we must transfer CSW now.
1287 cmd->error = -EOVERFLOW; /* A cheap trick... */
1288 ub_state_stat(sc, cmd);
1292 if (cmd->dir == UB_DIR_WRITE) {
1294 * Do not continue writes in case of a failure.
1295 * Doing so would cause sectors to be mixed up,
1296 * which is worse than sectors lost.
1298 * We must try to read the CSW, or many devices
1301 len = urb->actual_length;
1302 if (urb->status != 0 ||
1303 len != cmd->sgv[cmd->current_sg].length) {
1304 cmd->act_len += len;
1305 ub_cmdtr_act_len(sc, cmd);
1308 ub_state_stat(sc, cmd);
1314 * If an error occurs on read, we record it, and
1315 * continue to fetch data in order to avoid bubble.
1317 * As a small shortcut, we stop if we detect that
1318 * a CSW mixed into data.
1320 if (urb->status != 0)
1323 len = urb->actual_length;
1324 if (urb->status != 0 ||
1325 len != cmd->sgv[cmd->current_sg].length) {
1326 if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
1331 cmd->act_len += urb->actual_length;
1332 ub_cmdtr_act_len(sc, cmd);
1334 if (++cmd->current_sg < cmd->nsg) {
1335 ub_data_start(sc, cmd);
1338 ub_state_stat(sc, cmd);
1340 } else if (cmd->state == UB_CMDST_STAT) {
1341 if (urb->status == -EPIPE) {
1342 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1344 printk(KERN_NOTICE "%s: "
1345 "unable to submit clear (%d)\n",
1347 ub_state_done(sc, cmd, rc);
1352 * Having a stall when getting CSW is an error, so
1353 * make sure uppper levels are not oblivious to it.
1355 cmd->error = -EIO; /* A cheap trick... */
1357 cmd->state = UB_CMDST_CLRRS;
1358 ub_cmdtr_state(sc, cmd);
1362 /* Catch everything, including -EOVERFLOW and other nasties. */
1363 if (urb->status != 0)
1366 if (urb->actual_length == 0) {
1367 ub_state_stat_counted(sc, cmd);
1372 * Check the returned Bulk protocol status.
1373 * The status block has to be validated first.
1376 bcs = &sc->work_bcs;
1378 if (sc->signature == cpu_to_le32(0)) {
1380 * This is the first reply, so do not perform the check.
1381 * Instead, remember the signature the device uses
1382 * for future checks. But do not allow a nul.
1384 sc->signature = bcs->Signature;
1385 if (sc->signature == cpu_to_le32(0)) {
1386 ub_state_stat_counted(sc, cmd);
1390 if (bcs->Signature != sc->signature) {
1391 ub_state_stat_counted(sc, cmd);
1396 if (bcs->Tag != cmd->tag) {
1398 * This usually happens when we disagree with the
1399 * device's microcode about something. For instance,
1400 * a few of them throw this after timeouts. They buffer
1401 * commands and reply at commands we timed out before.
1402 * Without flushing these replies we loop forever.
1404 ub_state_stat_counted(sc, cmd);
1408 len = le32_to_cpu(bcs->Residue);
1409 if (len != cmd->len - cmd->act_len) {
1411 * It is all right to transfer less, the caller has
1412 * to check. But it's not all right if the device
1413 * counts disagree with our counts.
1415 /* P3 */ printk("%s: resid %d len %d act %d\n",
1416 sc->name, len, cmd->len, cmd->act_len);
1420 switch (bcs->Status) {
1421 case US_BULK_STAT_OK:
1423 case US_BULK_STAT_FAIL:
1424 ub_state_sense(sc, cmd);
1426 case US_BULK_STAT_PHASE:
1427 /* P3 */ printk("%s: status PHASE\n", sc->name);
1430 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1431 sc->name, bcs->Status);
1432 ub_state_done(sc, cmd, -EINVAL);
1436 /* Not zeroing error to preserve a babble indicator */
1437 if (cmd->error != 0) {
1438 ub_state_sense(sc, cmd);
1441 cmd->state = UB_CMDST_DONE;
1442 ub_cmdtr_state(sc, cmd);
1444 (*cmd->done)(sc, cmd);
1446 } else if (cmd->state == UB_CMDST_SENSE) {
1447 ub_state_done(sc, cmd, -EIO);
1450 printk(KERN_WARNING "%s: "
1451 "wrong command state %d\n",
1452 sc->name, cmd->state);
1453 ub_state_done(sc, cmd, -EINVAL);
1458 Bad_End: /* Little Excel is dead */
1459 ub_state_done(sc, cmd, -EIO);
1463 * Factorization helper for the command state machine:
1464 * Initiate a data segment transfer.
1466 static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1468 struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
1472 UB_INIT_COMPLETION(sc->work_done);
1474 if (cmd->dir == UB_DIR_READ)
1475 pipe = sc->recv_bulk_pipe;
1477 pipe = sc->send_bulk_pipe;
1478 sc->last_pipe = pipe;
1479 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1480 page_address(sg->page) + sg->offset, sg->length,
1481 ub_urb_complete, sc);
1482 sc->work_urb.actual_length = 0;
1483 sc->work_urb.error_count = 0;
1484 sc->work_urb.status = 0;
1486 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1487 /* XXX Clear stalls */
1488 ub_complete(&sc->work_done);
1489 ub_state_done(sc, cmd, rc);
1493 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1494 add_timer(&sc->work_timer);
1496 cmd->state = UB_CMDST_DATA;
1497 ub_cmdtr_state(sc, cmd);
1501 * Factorization helper for the command state machine:
1502 * Finish the command.
1504 static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1508 cmd->state = UB_CMDST_DONE;
1509 ub_cmdtr_state(sc, cmd);
1511 (*cmd->done)(sc, cmd);
1515 * Factorization helper for the command state machine:
1516 * Submit a CSW read.
1518 static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1522 UB_INIT_COMPLETION(sc->work_done);
1524 sc->last_pipe = sc->recv_bulk_pipe;
1525 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1526 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1527 sc->work_urb.actual_length = 0;
1528 sc->work_urb.error_count = 0;
1529 sc->work_urb.status = 0;
1531 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1532 /* XXX Clear stalls */
1533 ub_complete(&sc->work_done);
1534 ub_state_done(sc, cmd, rc);
1538 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1539 add_timer(&sc->work_timer);
1544 * Factorization helper for the command state machine:
1545 * Submit a CSW read and go to STAT state.
1547 static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1550 if (__ub_state_stat(sc, cmd) != 0)
1553 cmd->stat_count = 0;
1554 cmd->state = UB_CMDST_STAT;
1555 ub_cmdtr_state(sc, cmd);
1559 * Factorization helper for the command state machine:
1560 * Submit a CSW read and go to STAT state with counter (along [C] path).
1562 static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1565 if (++cmd->stat_count >= 4) {
1566 ub_state_sense(sc, cmd);
1570 if (__ub_state_stat(sc, cmd) != 0)
1573 cmd->state = UB_CMDST_STAT;
1574 ub_cmdtr_state(sc, cmd);
1578 * Factorization helper for the command state machine:
1579 * Submit a REQUEST SENSE and go to SENSE state.
1581 static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1583 struct ub_scsi_cmd *scmd;
1584 struct scatterlist *sg;
1587 if (cmd->cdb[0] == REQUEST_SENSE) {
1592 scmd = &sc->top_rqs_cmd;
1593 memset(scmd, 0, sizeof(struct ub_scsi_cmd));
1594 scmd->cdb[0] = REQUEST_SENSE;
1595 scmd->cdb[4] = UB_SENSE_SIZE;
1597 scmd->dir = UB_DIR_READ;
1598 scmd->state = UB_CMDST_INIT;
1601 sg->page = virt_to_page(sc->top_sense);
1602 sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
1603 sg->length = UB_SENSE_SIZE;
1604 scmd->len = UB_SENSE_SIZE;
1605 scmd->lun = cmd->lun;
1606 scmd->done = ub_top_sense_done;
1609 scmd->tag = sc->tagcnt++;
1611 cmd->state = UB_CMDST_SENSE;
1612 ub_cmdtr_state(sc, cmd);
1614 ub_cmdq_insert(sc, scmd);
1618 ub_state_done(sc, cmd, rc);
1622 * A helper for the command's state machine:
1623 * Submit a stall clear.
1625 static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1629 struct usb_ctrlrequest *cr;
1632 endp = usb_pipeendpoint(stalled_pipe);
1633 if (usb_pipein (stalled_pipe))
1637 cr->bRequestType = USB_RECIP_ENDPOINT;
1638 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1639 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1640 cr->wIndex = cpu_to_le16(endp);
1641 cr->wLength = cpu_to_le16(0);
1643 UB_INIT_COMPLETION(sc->work_done);
1645 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1646 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1647 sc->work_urb.actual_length = 0;
1648 sc->work_urb.error_count = 0;
1649 sc->work_urb.status = 0;
1651 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1652 ub_complete(&sc->work_done);
1656 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1657 add_timer(&sc->work_timer);
1663 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1665 unsigned char *sense = sc->top_sense;
1666 struct ub_scsi_cmd *cmd;
1669 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1671 ub_cmdtr_sense(sc, scmd, sense);
1674 * Find the command which triggered the unit attention or a check,
1675 * save the sense into it, and advance its state machine.
1677 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1678 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1681 if (cmd != scmd->back) {
1682 printk(KERN_WARNING "%s: "
1683 "sense done for wrong command 0x%x\n",
1684 sc->name, cmd->tag);
1687 if (cmd->state != UB_CMDST_SENSE) {
1688 printk(KERN_WARNING "%s: "
1689 "sense done with bad cmd state %d\n",
1690 sc->name, cmd->state);
1694 cmd->key = sense[2] & 0x0F;
1695 cmd->asc = sense[12];
1696 cmd->ascq = sense[13];
1698 ub_scsi_urb_compl(sc, cmd);
1703 * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
1704 * XXX Make usb_sync_reset asynchronous.
1707 static void ub_reset_enter(struct ub_dev *sc, int try)
1711 /* This happens often on multi-LUN devices. */
1714 sc->reset = try + 1;
1716 #if 0 /* Not needed because the disconnect waits for us. */
1717 unsigned long flags;
1718 spin_lock_irqsave(&ub_lock, flags);
1720 spin_unlock_irqrestore(&ub_lock, flags);
1723 #if 0 /* We let them stop themselves. */
1724 struct list_head *p;
1726 list_for_each(p, &sc->luns) {
1727 lun = list_entry(p, struct ub_lun, link);
1728 blk_stop_queue(lun->disk->queue);
1732 schedule_work(&sc->reset_work);
1735 static void ub_reset_task(void *arg)
1737 struct ub_dev *sc = arg;
1738 unsigned long flags;
1739 struct list_head *p;
1744 printk(KERN_WARNING "%s: Running reset unrequested\n",
1749 if (atomic_read(&sc->poison)) {
1750 printk(KERN_NOTICE "%s: Not resetting disconnected device\n",
1751 sc->name); /* P3 This floods. Remove soon. XXX */
1752 } else if ((sc->reset & 1) == 0) {
1754 msleep(700); /* usb-storage sleeps 6s (!) */
1755 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1756 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1757 } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
1758 printk(KERN_NOTICE "%s: Not resetting multi-interface device\n",
1759 sc->name); /* P3 This floods. Remove soon. XXX */
1761 if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) {
1763 "%s: usb_lock_device_for_reset failed (%d)\n",
1766 rc = usb_reset_device(sc->dev);
1768 printk(KERN_NOTICE "%s: "
1769 "usb_lock_device_for_reset failed (%d)\n",
1774 usb_unlock_device(sc->dev);
1779 * In theory, no commands can be running while reset is active,
1780 * so nobody can ask for another reset, and so we do not need any
1781 * queues of resets or anything. We do need a spinlock though,
1782 * to interact with block layer.
1784 spin_lock_irqsave(sc->lock, flags);
1786 tasklet_schedule(&sc->tasklet);
1787 list_for_each(p, &sc->luns) {
1788 lun = list_entry(p, struct ub_lun, link);
1789 blk_start_queue(lun->disk->queue);
1791 wake_up(&sc->reset_wait);
1792 spin_unlock_irqrestore(sc->lock, flags);
1796 * This is called from a process context.
1798 static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
1801 lun->readonly = 0; /* XXX Query this from the device */
1803 lun->capacity.nsec = 0;
1804 lun->capacity.bsize = 512;
1805 lun->capacity.bshift = 0;
1807 if (ub_sync_tur(sc, lun) != 0)
1808 return; /* Not ready */
1811 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1813 * The retry here means something is wrong, either with the
1814 * device, with the transport, or with our code.
1815 * We keep this because sd.c has retries for capacity.
1817 if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
1818 lun->capacity.nsec = 0;
1819 lun->capacity.bsize = 512;
1820 lun->capacity.bshift = 0;
1827 * This is mostly needed to keep refcounting, but also to support
1828 * media checks on removable media drives.
1830 static int ub_bd_open(struct inode *inode, struct file *filp)
1832 struct gendisk *disk = inode->i_bdev->bd_disk;
1835 unsigned long flags;
1838 if ((lun = disk->private_data) == NULL)
1842 spin_lock_irqsave(&ub_lock, flags);
1843 if (atomic_read(&sc->poison)) {
1844 spin_unlock_irqrestore(&ub_lock, flags);
1848 spin_unlock_irqrestore(&ub_lock, flags);
1850 if (lun->removable || lun->readonly)
1851 check_disk_change(inode->i_bdev);
1854 * The sd.c considers ->media_present and ->changed not equivalent,
1855 * under some pretty murky conditions (a failure of READ CAPACITY).
1856 * We may need it one day.
1858 if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) {
1863 if (lun->readonly && (filp->f_mode & FMODE_WRITE)) {
1877 static int ub_bd_release(struct inode *inode, struct file *filp)
1879 struct gendisk *disk = inode->i_bdev->bd_disk;
1880 struct ub_lun *lun = disk->private_data;
1881 struct ub_dev *sc = lun->udev;
1888 * The ioctl interface.
1890 static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1891 unsigned int cmd, unsigned long arg)
1893 struct gendisk *disk = inode->i_bdev->bd_disk;
1894 void __user *usermem = (void __user *) arg;
1896 return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1900 * This is called once a new disk was seen by the block layer or by ub_probe().
1901 * The main onjective here is to discover the features of the media such as
1902 * the capacity, read-only status, etc. USB storage generally does not
1903 * need to be spun up, but if we needed it, this would be the place.
1905 * This call can sleep.
1907 * The return code is not used.
1909 static int ub_bd_revalidate(struct gendisk *disk)
1911 struct ub_lun *lun = disk->private_data;
1913 ub_revalidate(lun->udev, lun);
1915 /* XXX Support sector size switching like in sr.c */
1916 blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
1917 set_capacity(disk, lun->capacity.nsec);
1918 // set_disk_ro(sdkp->disk, lun->readonly);
1924 * The check is called by the block layer to verify if the media
1925 * is still available. It is supposed to be harmless, lightweight and
1926 * non-intrusive in case the media was not changed.
1928 * This call can sleep.
1930 * The return code is bool!
1932 static int ub_bd_media_changed(struct gendisk *disk)
1934 struct ub_lun *lun = disk->private_data;
1936 if (!lun->removable)
1940 * We clean checks always after every command, so this is not
1941 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1942 * the device is actually not ready with operator or software
1943 * intervention required. One dangerous item might be a drive which
1944 * spins itself down, and come the time to write dirty pages, this
1945 * will fail, then block layer discards the data. Since we never
1946 * spin drives up, such devices simply cannot be used with ub anyway.
1948 if (ub_sync_tur(lun->udev, lun) != 0) {
1953 return lun->changed;
1956 static struct block_device_operations ub_bd_fops = {
1957 .owner = THIS_MODULE,
1959 .release = ub_bd_release,
1960 .ioctl = ub_bd_ioctl,
1961 .media_changed = ub_bd_media_changed,
1962 .revalidate_disk = ub_bd_revalidate,
1966 * Common ->done routine for commands executed synchronously.
1968 static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1970 struct completion *cop = cmd->back;
1975 * Test if the device has a check condition on it, synchronously.
1977 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
1979 struct ub_scsi_cmd *cmd;
1980 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1981 unsigned long flags;
1982 struct completion compl;
1985 init_completion(&compl);
1988 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1991 cmd->cdb[0] = TEST_UNIT_READY;
1993 cmd->dir = UB_DIR_NONE;
1994 cmd->state = UB_CMDST_INIT;
1995 cmd->lun = lun; /* This may be NULL, but that's ok */
1996 cmd->done = ub_probe_done;
1999 spin_lock_irqsave(sc->lock, flags);
2000 cmd->tag = sc->tagcnt++;
2002 rc = ub_submit_scsi(sc, cmd);
2003 spin_unlock_irqrestore(sc->lock, flags);
2006 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
2010 wait_for_completion(&compl);
2014 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
2024 * Read the SCSI capacity synchronously (for probing).
2026 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
2027 struct ub_capacity *ret)
2029 struct ub_scsi_cmd *cmd;
2030 struct scatterlist *sg;
2032 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
2033 unsigned long flags;
2034 unsigned int bsize, shift;
2036 struct completion compl;
2039 init_completion(&compl);
2042 if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2044 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
2048 cmd->dir = UB_DIR_READ;
2049 cmd->state = UB_CMDST_INIT;
2052 sg->page = virt_to_page(p);
2053 sg->offset = (unsigned long)p & (PAGE_SIZE-1);
2057 cmd->done = ub_probe_done;
2060 spin_lock_irqsave(sc->lock, flags);
2061 cmd->tag = sc->tagcnt++;
2063 rc = ub_submit_scsi(sc, cmd);
2064 spin_unlock_irqrestore(sc->lock, flags);
2067 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
2071 wait_for_completion(&compl);
2073 if (cmd->error != 0) {
2074 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */
2078 if (cmd->act_len != 8) {
2079 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */
2084 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
2085 nsec = be32_to_cpu(*(__be32 *)p) + 1;
2086 bsize = be32_to_cpu(*(__be32 *)(p + 4));
2088 case 512: shift = 0; break;
2089 case 1024: shift = 1; break;
2090 case 2048: shift = 2; break;
2091 case 4096: shift = 3; break;
2093 printk("ub: Bad sector size %u\n", bsize); /* P3 */
2099 ret->bshift = shift;
2100 ret->nsec = nsec << shift;
2113 static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
2115 struct completion *cop = urb->context;
2119 static void ub_probe_timeout(unsigned long arg)
2121 struct completion *cop = (struct completion *) arg;
2126 * Reset with a Bulk reset.
2128 static int ub_sync_reset(struct ub_dev *sc)
2130 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2131 struct usb_ctrlrequest *cr;
2132 struct completion compl;
2133 struct timer_list timer;
2136 init_completion(&compl);
2139 cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2140 cr->bRequest = US_BULK_RESET_REQUEST;
2141 cr->wValue = cpu_to_le16(0);
2142 cr->wIndex = cpu_to_le16(ifnum);
2143 cr->wLength = cpu_to_le16(0);
2145 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2146 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2147 sc->work_urb.actual_length = 0;
2148 sc->work_urb.error_count = 0;
2149 sc->work_urb.status = 0;
2151 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2153 "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
2158 timer.function = ub_probe_timeout;
2159 timer.data = (unsigned long) &compl;
2160 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2163 wait_for_completion(&compl);
2165 del_timer_sync(&timer);
2166 usb_kill_urb(&sc->work_urb);
2168 return sc->work_urb.status;
2172 * Get number of LUNs by the way of Bulk GetMaxLUN command.
2174 static int ub_sync_getmaxlun(struct ub_dev *sc)
2176 int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
2178 enum { ALLOC_SIZE = 1 };
2179 struct usb_ctrlrequest *cr;
2180 struct completion compl;
2181 struct timer_list timer;
2185 init_completion(&compl);
2188 if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
2193 cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
2194 cr->bRequest = US_BULK_GET_MAX_LUN;
2195 cr->wValue = cpu_to_le16(0);
2196 cr->wIndex = cpu_to_le16(ifnum);
2197 cr->wLength = cpu_to_le16(1);
2199 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2200 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2201 sc->work_urb.actual_length = 0;
2202 sc->work_urb.error_count = 0;
2203 sc->work_urb.status = 0;
2205 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2207 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
2211 "%s: Unable to submit GetMaxLUN (%d)\n",
2218 timer.function = ub_probe_timeout;
2219 timer.data = (unsigned long) &compl;
2220 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2223 wait_for_completion(&compl);
2225 del_timer_sync(&timer);
2226 usb_kill_urb(&sc->work_urb);
2228 if ((rc = sc->work_urb.status) < 0) {
2230 printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
2234 "%s: Error at GetMaxLUN (%d)\n",
2240 if (sc->work_urb.actual_length != 1) {
2241 printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
2242 sc->work_urb.actual_length); /* P3 */
2245 if ((nluns = *p) == 55) {
2248 /* GetMaxLUN returns the maximum LUN number */
2250 if (nluns > UB_MAX_LUNS)
2251 nluns = UB_MAX_LUNS;
2253 printk("%s: GetMaxLUN returned %d, using %d LUNs\n", sc->name,
2254 *p, nluns); /* P3 */
2268 * Clear initial stalls.
2270 static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2273 struct usb_ctrlrequest *cr;
2274 struct completion compl;
2275 struct timer_list timer;
2278 init_completion(&compl);
2280 endp = usb_pipeendpoint(stalled_pipe);
2281 if (usb_pipein (stalled_pipe))
2285 cr->bRequestType = USB_RECIP_ENDPOINT;
2286 cr->bRequest = USB_REQ_CLEAR_FEATURE;
2287 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
2288 cr->wIndex = cpu_to_le16(endp);
2289 cr->wLength = cpu_to_le16(0);
2291 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2292 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2293 sc->work_urb.actual_length = 0;
2294 sc->work_urb.error_count = 0;
2295 sc->work_urb.status = 0;
2297 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2299 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
2304 timer.function = ub_probe_timeout;
2305 timer.data = (unsigned long) &compl;
2306 timer.expires = jiffies + UB_CTRL_TIMEOUT;
2309 wait_for_completion(&compl);
2311 del_timer_sync(&timer);
2312 usb_kill_urb(&sc->work_urb);
2314 /* reset the endpoint toggle */
2315 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
2321 * Get the pipe settings.
2323 static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
2324 struct usb_interface *intf)
2326 struct usb_host_interface *altsetting = intf->cur_altsetting;
2327 struct usb_endpoint_descriptor *ep_in = NULL;
2328 struct usb_endpoint_descriptor *ep_out = NULL;
2329 struct usb_endpoint_descriptor *ep;
2333 * Find the endpoints we need.
2334 * We are expecting a minimum of 2 endpoints - in and out (bulk).
2335 * We will ignore any others.
2337 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
2338 ep = &altsetting->endpoint[i].desc;
2340 /* Is it a BULK endpoint? */
2341 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
2342 == USB_ENDPOINT_XFER_BULK) {
2343 /* BULK in or out? */
2344 if (ep->bEndpointAddress & USB_DIR_IN)
2351 if (ep_in == NULL || ep_out == NULL) {
2352 printk(KERN_NOTICE "%s: failed endpoint check\n",
2357 /* Calculate and store the pipe values */
2358 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
2359 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
2360 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
2361 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2362 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
2363 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
2369 * Probing is done in the process context, which allows us to cheat
2370 * and not to build a state machine for the discovery.
2372 static int ub_probe(struct usb_interface *intf,
2373 const struct usb_device_id *dev_id)
2380 if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
2384 if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
2386 sc->lock = ub_next_lock();
2387 INIT_LIST_HEAD(&sc->luns);
2388 usb_init_urb(&sc->work_urb);
2389 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
2390 atomic_set(&sc->poison, 0);
2391 INIT_WORK(&sc->reset_work, ub_reset_task, sc);
2392 init_waitqueue_head(&sc->reset_wait);
2394 init_timer(&sc->work_timer);
2395 sc->work_timer.data = (unsigned long) sc;
2396 sc->work_timer.function = ub_urb_timeout;
2398 ub_init_completion(&sc->work_done);
2399 sc->work_done.done = 1; /* A little yuk, but oh well... */
2401 sc->dev = interface_to_usbdev(intf);
2403 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
2404 usb_set_intfdata(intf, sc);
2405 usb_get_dev(sc->dev);
2406 // usb_get_intf(sc->intf); /* Do we need this? */
2408 snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
2409 sc->dev->bus->busnum, sc->dev->devnum);
2411 /* XXX Verify that we can handle the device (from descriptors) */
2413 if (ub_get_pipes(sc, sc->dev, intf) != 0)
2416 if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0)
2420 * At this point, all USB initialization is done, do upper layer.
2421 * We really hate halfway initialized structures, so from the
2422 * invariants perspective, this ub_dev is fully constructed at
2427 * This is needed to clear toggles. It is a problem only if we do
2428 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2430 #if 0 /* iPod Mini fails if we do this (big white iPod works) */
2431 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2432 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2436 * The way this is used by the startup code is a little specific.
2437 * A SCSI check causes a USB stall. Our common case code sees it
2438 * and clears the check, after which the device is ready for use.
2439 * But if a check was not present, any command other than
2440 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
2442 * If we neglect to clear the SCSI check, the first real command fails
2443 * (which is the capacity readout). We clear that and retry, but why
2444 * causing spurious retries for no reason.
2446 * Revalidation may start with its own TEST_UNIT_READY, but that one
2447 * has to succeed, so we clear checks with an additional one here.
2448 * In any case it's not our business how revaliadation is implemented.
2450 for (i = 0; i < 3; i++) { /* Retries for benh's key */
2451 if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
2452 if (rc != 0x6) break;
2457 for (i = 0; i < 3; i++) {
2458 if ((rc = ub_sync_getmaxlun(sc)) < 0) {
2460 * This segment is taken from usb-storage. They say
2461 * that ZIP-100 needs this, but my own ZIP-100 works
2462 * fine without this.
2463 * Still, it does not seem to hurt anything.
2466 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2467 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2478 for (i = 0; i < nluns; i++) {
2479 ub_probe_lun(sc, i);
2483 /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
2486 usb_set_intfdata(intf, NULL);
2487 // usb_put_intf(sc->intf);
2488 usb_put_dev(sc->dev);
2494 static int ub_probe_lun(struct ub_dev *sc, int lnum)
2498 struct gendisk *disk;
2502 if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
2507 if ((lun->id = ub_id_get()) == -1)
2511 list_add(&lun->link, &sc->luns);
2513 snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
2514 lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
2516 lun->removable = 1; /* XXX Query this from the device */
2517 lun->changed = 1; /* ub_revalidate clears only */
2518 ub_revalidate(sc, lun);
2521 if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
2525 sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
2526 sprintf(disk->devfs_name, DEVFS_NAME "/%c", lun->id + 'a');
2527 disk->major = UB_MAJOR;
2528 disk->first_minor = lun->id * UB_PARTS_PER_LUN;
2529 disk->fops = &ub_bd_fops;
2530 disk->private_data = lun;
2531 disk->driverfs_dev = &sc->intf->dev;
2534 if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
2539 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
2540 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2541 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2542 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
2543 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2544 blk_queue_hardsect_size(q, lun->capacity.bsize);
2548 set_capacity(disk, lun->capacity.nsec);
2550 disk->flags |= GENHD_FL_REMOVABLE;
2559 list_del(&lun->link);
2567 static void ub_disconnect(struct usb_interface *intf)
2569 struct ub_dev *sc = usb_get_intfdata(intf);
2570 struct list_head *p;
2572 struct gendisk *disk;
2573 unsigned long flags;
2576 * Prevent ub_bd_release from pulling the rug from under us.
2577 * XXX This is starting to look like a kref.
2578 * XXX Why not to take this ref at probe time?
2580 spin_lock_irqsave(&ub_lock, flags);
2582 spin_unlock_irqrestore(&ub_lock, flags);
2585 * Fence stall clearnings, operations triggered by unlinkings and so on.
2586 * We do not attempt to unlink any URBs, because we do not trust the
2587 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2589 atomic_set(&sc->poison, 1);
2592 * Wait for reset to end, if any.
2594 wait_event(sc->reset_wait, !sc->reset);
2597 * Blow away queued commands.
2599 * Actually, this never works, because before we get here
2600 * the HCD terminates outstanding URB(s). It causes our
2601 * SCSI command queue to advance, commands fail to submit,
2602 * and the whole queue drains. So, we just use this code to
2605 spin_lock_irqsave(sc->lock, flags);
2607 struct ub_scsi_cmd *cmd;
2609 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
2610 cmd->error = -ENOTCONN;
2611 cmd->state = UB_CMDST_DONE;
2612 ub_cmdtr_state(sc, cmd);
2614 (*cmd->done)(sc, cmd);
2618 printk(KERN_WARNING "%s: "
2619 "%d was queued after shutdown\n", sc->name, cnt);
2622 spin_unlock_irqrestore(sc->lock, flags);
2625 * Unregister the upper layer.
2627 list_for_each (p, &sc->luns) {
2628 lun = list_entry(p, struct ub_lun, link);
2630 if (disk->flags & GENHD_FL_UP)
2633 * I wish I could do:
2634 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2635 * As it is, we rely on our internal poisoning and let
2636 * the upper levels to spin furiously failing all the I/O.
2641 * Testing for -EINPROGRESS is always a bug, so we are bending
2642 * the rules a little.
2644 spin_lock_irqsave(sc->lock, flags);
2645 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2646 printk(KERN_WARNING "%s: "
2647 "URB is active after disconnect\n", sc->name);
2649 spin_unlock_irqrestore(sc->lock, flags);
2652 * There is virtually no chance that other CPU runs times so long
2653 * after ub_urb_complete should have called del_timer, but only if HCD
2654 * didn't forget to deliver a callback on unlink.
2656 del_timer_sync(&sc->work_timer);
2659 * At this point there must be no commands coming from anyone
2660 * and no URBs left in transit.
2663 device_remove_file(&sc->intf->dev, &dev_attr_diag);
2664 usb_set_intfdata(intf, NULL);
2665 // usb_put_intf(sc->intf);
2667 usb_put_dev(sc->dev);
2673 static struct usb_driver ub_driver = {
2676 .disconnect = ub_disconnect,
2677 .id_table = ub_usb_ids,
2680 static int __init ub_init(void)
2685 for (i = 0; i < UB_QLOCK_NUM; i++)
2686 spin_lock_init(&ub_qlockv[i]);
2688 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2690 devfs_mk_dir(DEVFS_NAME);
2692 if ((rc = usb_register(&ub_driver)) != 0)
2695 usb_usual_set_present(USB_US_TYPE_UB);
2699 devfs_remove(DEVFS_NAME);
2700 unregister_blkdev(UB_MAJOR, DRV_NAME);
2705 static void __exit ub_exit(void)
2707 usb_deregister(&ub_driver);
2709 devfs_remove(DEVFS_NAME);
2710 unregister_blkdev(UB_MAJOR, DRV_NAME);
2711 usb_usual_clear_present(USB_US_TYPE_UB);
2714 module_init(ub_init);
2715 module_exit(ub_exit);
2717 MODULE_LICENSE("GPL");