]> err.no Git - linux-2.6/blob - drivers/scsi/scsi_lib.c
c3bb28c3feefe71aa95ab12691ea106bcc3c7163
[linux-2.6] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
27
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
30
31
32 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE         32
34
35 struct scsi_host_sg_pool {
36         size_t          size;
37         char            *name; 
38         kmem_cache_t    *slab;
39         mempool_t       *pool;
40 };
41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
45
46 #define SP(x) { x, "sgpool-" #x } 
47 struct scsi_host_sg_pool scsi_sg_pools[] = { 
48         SP(8),
49         SP(16),
50         SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52         SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54         SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56         SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };      
64 #undef SP
65
66
67 /*
68  * Function:    scsi_insert_special_req()
69  *
70  * Purpose:     Insert pre-formed request into request queue.
71  *
72  * Arguments:   sreq    - request that is ready to be queued.
73  *              at_head - boolean.  True if we should insert at head
74  *                        of queue, false if we should insert at tail.
75  *
76  * Lock status: Assumed that lock is not held upon entry.
77  *
78  * Returns:     Nothing
79  *
80  * Notes:       This function is called from character device and from
81  *              ioctl types of functions where the caller knows exactly
82  *              what SCSI command needs to be issued.   The idea is that
83  *              we merely inject the command into the queue (at the head
84  *              for now), and then call the queue request function to actually
85  *              process it.
86  */
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
88 {
89         /*
90          * Because users of this function are apt to reuse requests with no
91          * modification, we have to sanitise the request flags here
92          */
93         sreq->sr_request->flags &= ~REQ_DONTPREP;
94         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95                            at_head, sreq);
96         return 0;
97 }
98
99 static void scsi_run_queue(struct request_queue *q);
100
101 /*
102  * Function:    scsi_queue_insert()
103  *
104  * Purpose:     Insert a command in the midlevel queue.
105  *
106  * Arguments:   cmd    - command that we are adding to queue.
107  *              reason - why we are inserting command to queue.
108  *
109  * Lock status: Assumed that lock is not held upon entry.
110  *
111  * Returns:     Nothing.
112  *
113  * Notes:       We do this for one of two cases.  Either the host is busy
114  *              and it cannot accept any more commands for the time being,
115  *              or the device returned QUEUE_FULL and can accept no more
116  *              commands.
117  * Notes:       This could be called either from an interrupt context or a
118  *              normal process context.
119  */
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
121 {
122         struct Scsi_Host *host = cmd->device->host;
123         struct scsi_device *device = cmd->device;
124         struct request_queue *q = device->request_queue;
125         unsigned long flags;
126
127         SCSI_LOG_MLQUEUE(1,
128                  printk("Inserting command %p into mlqueue\n", cmd));
129
130         /*
131          * We are inserting the command into the ml queue.  First, we
132          * cancel the timer, so it doesn't time out.
133          */
134         scsi_delete_timer(cmd);
135
136         /*
137          * Next, set the appropriate busy bit for the device/host.
138          *
139          * If the host/device isn't busy, assume that something actually
140          * completed, and that we should be able to queue a command now.
141          *
142          * Note that the prior mid-layer assumption that any host could
143          * always queue at least one command is now broken.  The mid-layer
144          * will implement a user specifiable stall (see
145          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
146          * if a command is requeued with no other commands outstanding
147          * either for the device or for the host.
148          */
149         if (reason == SCSI_MLQUEUE_HOST_BUSY)
150                 host->host_blocked = host->max_host_blocked;
151         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
152                 device->device_blocked = device->max_device_blocked;
153
154         /*
155          * Register the fact that we own the thing for now.
156          */
157         cmd->state = SCSI_STATE_MLQUEUE;
158         cmd->owner = SCSI_OWNER_MIDLEVEL;
159
160         /*
161          * Decrement the counters, since these commands are no longer
162          * active on the host/device.
163          */
164         scsi_device_unbusy(device);
165
166         /*
167          * Requeue this command.  It will go before all other commands
168          * that are already in the queue.
169          *
170          * NOTE: there is magic here about the way the queue is plugged if
171          * we have no outstanding commands.
172          * 
173          * Although we *don't* plug the queue, we call the request
174          * function.  The SCSI request function detects the blocked condition
175          * and plugs the queue appropriately.
176          */
177         spin_lock_irqsave(q->queue_lock, flags);
178         blk_requeue_request(q, cmd->request);
179         spin_unlock_irqrestore(q->queue_lock, flags);
180
181         scsi_run_queue(q);
182
183         return 0;
184 }
185
186 /*
187  * Function:    scsi_do_req
188  *
189  * Purpose:     Queue a SCSI request
190  *
191  * Arguments:   sreq      - command descriptor.
192  *              cmnd      - actual SCSI command to be performed.
193  *              buffer    - data buffer.
194  *              bufflen   - size of data buffer.
195  *              done      - completion function to be run.
196  *              timeout   - how long to let it run before timeout.
197  *              retries   - number of retries we allow.
198  *
199  * Lock status: No locks held upon entry.
200  *
201  * Returns:     Nothing.
202  *
203  * Notes:       This function is only used for queueing requests for things
204  *              like ioctls and character device requests - this is because
205  *              we essentially just inject a request into the queue for the
206  *              device.
207  *
208  *              In order to support the scsi_device_quiesce function, we
209  *              now inject requests on the *head* of the device queue
210  *              rather than the tail.
211  */
212 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
213                  void *buffer, unsigned bufflen,
214                  void (*done)(struct scsi_cmnd *),
215                  int timeout, int retries)
216 {
217         /*
218          * If the upper level driver is reusing these things, then
219          * we should release the low-level block now.  Another one will
220          * be allocated later when this request is getting queued.
221          */
222         __scsi_release_request(sreq);
223
224         /*
225          * Our own function scsi_done (which marks the host as not busy,
226          * disables the timeout counter, etc) will be called by us or by the
227          * scsi_hosts[host].queuecommand() function needs to also call
228          * the completion function for the high level driver.
229          */
230         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
231         sreq->sr_bufflen = bufflen;
232         sreq->sr_buffer = buffer;
233         sreq->sr_allowed = retries;
234         sreq->sr_done = done;
235         sreq->sr_timeout_per_command = timeout;
236
237         if (sreq->sr_cmd_len == 0)
238                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
239
240         /*
241          * head injection *required* here otherwise quiesce won't work
242          */
243         scsi_insert_special_req(sreq, 1);
244 }
245 EXPORT_SYMBOL(scsi_do_req);
246
247 static void scsi_wait_done(struct scsi_cmnd *cmd)
248 {
249         struct request *req = cmd->request;
250         struct request_queue *q = cmd->device->request_queue;
251         unsigned long flags;
252
253         req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
254
255         spin_lock_irqsave(q->queue_lock, flags);
256         if (blk_rq_tagged(req))
257                 blk_queue_end_tag(q, req);
258         spin_unlock_irqrestore(q->queue_lock, flags);
259
260         if (req->waiting)
261                 complete(req->waiting);
262 }
263
264 /* This is the end routine we get to if a command was never attached
265  * to the request.  Simply complete the request without changing
266  * rq_status; this will cause a DRIVER_ERROR. */
267 static void scsi_wait_req_end_io(struct request *req)
268 {
269         BUG_ON(!req->waiting);
270
271         complete(req->waiting);
272 }
273
274 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
275                    unsigned bufflen, int timeout, int retries)
276 {
277         DECLARE_COMPLETION(wait);
278         
279         sreq->sr_request->waiting = &wait;
280         sreq->sr_request->rq_status = RQ_SCSI_BUSY;
281         sreq->sr_request->end_io = scsi_wait_req_end_io;
282         scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
283                         timeout, retries);
284         wait_for_completion(&wait);
285         sreq->sr_request->waiting = NULL;
286         if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
287                 sreq->sr_result |= (DRIVER_ERROR << 24);
288
289         __scsi_release_request(sreq);
290 }
291 EXPORT_SYMBOL(scsi_wait_req);
292
293 /*
294  * Function:    scsi_init_cmd_errh()
295  *
296  * Purpose:     Initialize cmd fields related to error handling.
297  *
298  * Arguments:   cmd     - command that is ready to be queued.
299  *
300  * Returns:     Nothing
301  *
302  * Notes:       This function has the job of initializing a number of
303  *              fields related to error handling.   Typically this will
304  *              be called once for each command, as required.
305  */
306 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
307 {
308         cmd->owner = SCSI_OWNER_MIDLEVEL;
309         cmd->serial_number = 0;
310         cmd->abort_reason = 0;
311
312         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
313
314         if (cmd->cmd_len == 0)
315                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
316
317         /*
318          * We need saved copies of a number of fields - this is because
319          * error handling may need to overwrite these with different values
320          * to run different commands, and once error handling is complete,
321          * we will need to restore these values prior to running the actual
322          * command.
323          */
324         cmd->old_use_sg = cmd->use_sg;
325         cmd->old_cmd_len = cmd->cmd_len;
326         cmd->sc_old_data_direction = cmd->sc_data_direction;
327         cmd->old_underflow = cmd->underflow;
328         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
329         cmd->buffer = cmd->request_buffer;
330         cmd->bufflen = cmd->request_bufflen;
331         cmd->abort_reason = 0;
332
333         return 1;
334 }
335
336 /*
337  * Function:   scsi_setup_cmd_retry()
338  *
339  * Purpose:    Restore the command state for a retry
340  *
341  * Arguments:  cmd      - command to be restored
342  *
343  * Returns:    Nothing
344  *
345  * Notes:      Immediately prior to retrying a command, we need
346  *             to restore certain fields that we saved above.
347  */
348 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
349 {
350         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
351         cmd->request_buffer = cmd->buffer;
352         cmd->request_bufflen = cmd->bufflen;
353         cmd->use_sg = cmd->old_use_sg;
354         cmd->cmd_len = cmd->old_cmd_len;
355         cmd->sc_data_direction = cmd->sc_old_data_direction;
356         cmd->underflow = cmd->old_underflow;
357 }
358
359 void scsi_device_unbusy(struct scsi_device *sdev)
360 {
361         struct Scsi_Host *shost = sdev->host;
362         unsigned long flags;
363
364         spin_lock_irqsave(shost->host_lock, flags);
365         shost->host_busy--;
366         if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
367                      shost->host_failed))
368                 scsi_eh_wakeup(shost);
369         spin_unlock(shost->host_lock);
370         spin_lock(sdev->request_queue->queue_lock);
371         sdev->device_busy--;
372         spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
373 }
374
375 /*
376  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
377  * and call blk_run_queue for all the scsi_devices on the target -
378  * including current_sdev first.
379  *
380  * Called with *no* scsi locks held.
381  */
382 static void scsi_single_lun_run(struct scsi_device *current_sdev)
383 {
384         struct Scsi_Host *shost = current_sdev->host;
385         struct scsi_device *sdev, *tmp;
386         struct scsi_target *starget = scsi_target(current_sdev);
387         unsigned long flags;
388
389         spin_lock_irqsave(shost->host_lock, flags);
390         starget->starget_sdev_user = NULL;
391         spin_unlock_irqrestore(shost->host_lock, flags);
392
393         /*
394          * Call blk_run_queue for all LUNs on the target, starting with
395          * current_sdev. We race with others (to set starget_sdev_user),
396          * but in most cases, we will be first. Ideally, each LU on the
397          * target would get some limited time or requests on the target.
398          */
399         blk_run_queue(current_sdev->request_queue);
400
401         spin_lock_irqsave(shost->host_lock, flags);
402         if (starget->starget_sdev_user)
403                 goto out;
404         list_for_each_entry_safe(sdev, tmp, &starget->devices,
405                         same_target_siblings) {
406                 if (sdev == current_sdev)
407                         continue;
408                 if (scsi_device_get(sdev))
409                         continue;
410
411                 spin_unlock_irqrestore(shost->host_lock, flags);
412                 blk_run_queue(sdev->request_queue);
413                 spin_lock_irqsave(shost->host_lock, flags);
414         
415                 scsi_device_put(sdev);
416         }
417  out:
418         spin_unlock_irqrestore(shost->host_lock, flags);
419 }
420
421 /*
422  * Function:    scsi_run_queue()
423  *
424  * Purpose:     Select a proper request queue to serve next
425  *
426  * Arguments:   q       - last request's queue
427  *
428  * Returns:     Nothing
429  *
430  * Notes:       The previous command was completely finished, start
431  *              a new one if possible.
432  */
433 static void scsi_run_queue(struct request_queue *q)
434 {
435         struct scsi_device *sdev = q->queuedata;
436         struct Scsi_Host *shost = sdev->host;
437         unsigned long flags;
438
439         if (sdev->single_lun)
440                 scsi_single_lun_run(sdev);
441
442         spin_lock_irqsave(shost->host_lock, flags);
443         while (!list_empty(&shost->starved_list) &&
444                !shost->host_blocked && !shost->host_self_blocked &&
445                 !((shost->can_queue > 0) &&
446                   (shost->host_busy >= shost->can_queue))) {
447                 /*
448                  * As long as shost is accepting commands and we have
449                  * starved queues, call blk_run_queue. scsi_request_fn
450                  * drops the queue_lock and can add us back to the
451                  * starved_list.
452                  *
453                  * host_lock protects the starved_list and starved_entry.
454                  * scsi_request_fn must get the host_lock before checking
455                  * or modifying starved_list or starved_entry.
456                  */
457                 sdev = list_entry(shost->starved_list.next,
458                                           struct scsi_device, starved_entry);
459                 list_del_init(&sdev->starved_entry);
460                 spin_unlock_irqrestore(shost->host_lock, flags);
461
462                 blk_run_queue(sdev->request_queue);
463
464                 spin_lock_irqsave(shost->host_lock, flags);
465                 if (unlikely(!list_empty(&sdev->starved_entry)))
466                         /*
467                          * sdev lost a race, and was put back on the
468                          * starved list. This is unlikely but without this
469                          * in theory we could loop forever.
470                          */
471                         break;
472         }
473         spin_unlock_irqrestore(shost->host_lock, flags);
474
475         blk_run_queue(q);
476 }
477
478 /*
479  * Function:    scsi_requeue_command()
480  *
481  * Purpose:     Handle post-processing of completed commands.
482  *
483  * Arguments:   q       - queue to operate on
484  *              cmd     - command that may need to be requeued.
485  *
486  * Returns:     Nothing
487  *
488  * Notes:       After command completion, there may be blocks left
489  *              over which weren't finished by the previous command
490  *              this can be for a number of reasons - the main one is
491  *              I/O errors in the middle of the request, in which case
492  *              we need to request the blocks that come after the bad
493  *              sector.
494  */
495 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
496 {
497         unsigned long flags;
498
499         cmd->request->flags &= ~REQ_DONTPREP;
500
501         spin_lock_irqsave(q->queue_lock, flags);
502         blk_requeue_request(q, cmd->request);
503         spin_unlock_irqrestore(q->queue_lock, flags);
504
505         scsi_run_queue(q);
506 }
507
508 void scsi_next_command(struct scsi_cmnd *cmd)
509 {
510         struct request_queue *q = cmd->device->request_queue;
511
512         scsi_put_command(cmd);
513         scsi_run_queue(q);
514 }
515
516 void scsi_run_host_queues(struct Scsi_Host *shost)
517 {
518         struct scsi_device *sdev;
519
520         shost_for_each_device(sdev, shost)
521                 scsi_run_queue(sdev->request_queue);
522 }
523
524 /*
525  * Function:    scsi_end_request()
526  *
527  * Purpose:     Post-processing of completed commands (usually invoked at end
528  *              of upper level post-processing and scsi_io_completion).
529  *
530  * Arguments:   cmd      - command that is complete.
531  *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
532  *              bytes    - number of bytes of completed I/O
533  *              requeue  - indicates whether we should requeue leftovers.
534  *
535  * Lock status: Assumed that lock is not held upon entry.
536  *
537  * Returns:     cmd if requeue done or required, NULL otherwise
538  *
539  * Notes:       This is called for block device requests in order to
540  *              mark some number of sectors as complete.
541  * 
542  *              We are guaranteeing that the request queue will be goosed
543  *              at some point during this call.
544  */
545 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
546                                           int bytes, int requeue)
547 {
548         request_queue_t *q = cmd->device->request_queue;
549         struct request *req = cmd->request;
550         unsigned long flags;
551
552         /*
553          * If there are blocks left over at the end, set up the command
554          * to queue the remainder of them.
555          */
556         if (end_that_request_chunk(req, uptodate, bytes)) {
557                 int leftover = (req->hard_nr_sectors << 9);
558
559                 if (blk_pc_request(req))
560                         leftover = req->data_len;
561
562                 /* kill remainder if no retrys */
563                 if (!uptodate && blk_noretry_request(req))
564                         end_that_request_chunk(req, 0, leftover);
565                 else {
566                         if (requeue)
567                                 /*
568                                  * Bleah.  Leftovers again.  Stick the
569                                  * leftovers in the front of the
570                                  * queue, and goose the queue again.
571                                  */
572                                 scsi_requeue_command(q, cmd);
573
574                         return cmd;
575                 }
576         }
577
578         add_disk_randomness(req->rq_disk);
579
580         spin_lock_irqsave(q->queue_lock, flags);
581         if (blk_rq_tagged(req))
582                 blk_queue_end_tag(q, req);
583         end_that_request_last(req);
584         spin_unlock_irqrestore(q->queue_lock, flags);
585
586         /*
587          * This will goose the queue request function at the end, so we don't
588          * need to worry about launching another command.
589          */
590         scsi_next_command(cmd);
591         return NULL;
592 }
593
594 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
595 {
596         struct scsi_host_sg_pool *sgp;
597         struct scatterlist *sgl;
598
599         BUG_ON(!cmd->use_sg);
600
601         switch (cmd->use_sg) {
602         case 1 ... 8:
603                 cmd->sglist_len = 0;
604                 break;
605         case 9 ... 16:
606                 cmd->sglist_len = 1;
607                 break;
608         case 17 ... 32:
609                 cmd->sglist_len = 2;
610                 break;
611 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
612         case 33 ... 64:
613                 cmd->sglist_len = 3;
614                 break;
615 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
616         case 65 ... 128:
617                 cmd->sglist_len = 4;
618                 break;
619 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
620         case 129 ... 256:
621                 cmd->sglist_len = 5;
622                 break;
623 #endif
624 #endif
625 #endif
626         default:
627                 return NULL;
628         }
629
630         sgp = scsi_sg_pools + cmd->sglist_len;
631         sgl = mempool_alloc(sgp->pool, gfp_mask);
632         if (sgl)
633                 memset(sgl, 0, sgp->size);
634         return sgl;
635 }
636
637 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
638 {
639         struct scsi_host_sg_pool *sgp;
640
641         BUG_ON(index > SG_MEMPOOL_NR);
642
643         sgp = scsi_sg_pools + index;
644         mempool_free(sgl, sgp->pool);
645 }
646
647 /*
648  * Function:    scsi_release_buffers()
649  *
650  * Purpose:     Completion processing for block device I/O requests.
651  *
652  * Arguments:   cmd     - command that we are bailing.
653  *
654  * Lock status: Assumed that no lock is held upon entry.
655  *
656  * Returns:     Nothing
657  *
658  * Notes:       In the event that an upper level driver rejects a
659  *              command, we must release resources allocated during
660  *              the __init_io() function.  Primarily this would involve
661  *              the scatter-gather table, and potentially any bounce
662  *              buffers.
663  */
664 static void scsi_release_buffers(struct scsi_cmnd *cmd)
665 {
666         struct request *req = cmd->request;
667
668         /*
669          * Free up any indirection buffers we allocated for DMA purposes. 
670          */
671         if (cmd->use_sg)
672                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
673         else if (cmd->request_buffer != req->buffer)
674                 kfree(cmd->request_buffer);
675
676         /*
677          * Zero these out.  They now point to freed memory, and it is
678          * dangerous to hang onto the pointers.
679          */
680         cmd->buffer  = NULL;
681         cmd->bufflen = 0;
682         cmd->request_buffer = NULL;
683         cmd->request_bufflen = 0;
684 }
685
686 /*
687  * Function:    scsi_io_completion()
688  *
689  * Purpose:     Completion processing for block device I/O requests.
690  *
691  * Arguments:   cmd   - command that is finished.
692  *
693  * Lock status: Assumed that no lock is held upon entry.
694  *
695  * Returns:     Nothing
696  *
697  * Notes:       This function is matched in terms of capabilities to
698  *              the function that created the scatter-gather list.
699  *              In other words, if there are no bounce buffers
700  *              (the normal case for most drivers), we don't need
701  *              the logic to deal with cleaning up afterwards.
702  *
703  *              We must do one of several things here:
704  *
705  *              a) Call scsi_end_request.  This will finish off the
706  *                 specified number of sectors.  If we are done, the
707  *                 command block will be released, and the queue
708  *                 function will be goosed.  If we are not done, then
709  *                 scsi_end_request will directly goose the queue.
710  *
711  *              b) We can just use scsi_requeue_command() here.  This would
712  *                 be used if we just wanted to retry, for example.
713  */
714 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
715                         unsigned int block_bytes)
716 {
717         int result = cmd->result;
718         int this_count = cmd->bufflen;
719         request_queue_t *q = cmd->device->request_queue;
720         struct request *req = cmd->request;
721         int clear_errors = 1;
722         struct scsi_sense_hdr sshdr;
723         int sense_valid = 0;
724         int sense_deferred = 0;
725
726         if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
727                 return;
728
729         /*
730          * Free up any indirection buffers we allocated for DMA purposes. 
731          * For the case of a READ, we need to copy the data out of the
732          * bounce buffer and into the real buffer.
733          */
734         if (cmd->use_sg)
735                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
736         else if (cmd->buffer != req->buffer) {
737                 if (rq_data_dir(req) == READ) {
738                         unsigned long flags;
739                         char *to = bio_kmap_irq(req->bio, &flags);
740                         memcpy(to, cmd->buffer, cmd->bufflen);
741                         bio_kunmap_irq(to, &flags);
742                 }
743                 kfree(cmd->buffer);
744         }
745
746         if (result) {
747                 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
748                 if (sense_valid)
749                         sense_deferred = scsi_sense_is_deferred(&sshdr);
750         }
751         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
752                 req->errors = result;
753                 if (result) {
754                         clear_errors = 0;
755                         if (sense_valid && req->sense) {
756                                 /*
757                                  * SG_IO wants current and deferred errors
758                                  */
759                                 int len = 8 + cmd->sense_buffer[7];
760
761                                 if (len > SCSI_SENSE_BUFFERSIZE)
762                                         len = SCSI_SENSE_BUFFERSIZE;
763                                 memcpy(req->sense, cmd->sense_buffer,  len);
764                                 req->sense_len = len;
765                         }
766                 } else
767                         req->data_len = cmd->resid;
768         }
769
770         /*
771          * Zero these out.  They now point to freed memory, and it is
772          * dangerous to hang onto the pointers.
773          */
774         cmd->buffer  = NULL;
775         cmd->bufflen = 0;
776         cmd->request_buffer = NULL;
777         cmd->request_bufflen = 0;
778
779         /*
780          * Next deal with any sectors which we were able to correctly
781          * handle.
782          */
783         if (good_bytes >= 0) {
784                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
785                                               req->nr_sectors, good_bytes));
786                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
787
788                 if (clear_errors)
789                         req->errors = 0;
790                 /*
791                  * If multiple sectors are requested in one buffer, then
792                  * they will have been finished off by the first command.
793                  * If not, then we have a multi-buffer command.
794                  *
795                  * If block_bytes != 0, it means we had a medium error
796                  * of some sort, and that we want to mark some number of
797                  * sectors as not uptodate.  Thus we want to inhibit
798                  * requeueing right here - we will requeue down below
799                  * when we handle the bad sectors.
800                  */
801                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
802
803                 /*
804                  * If the command completed without error, then either finish off the
805                  * rest of the command, or start a new one.
806                  */
807                 if (result == 0 || cmd == NULL ) {
808                         return;
809                 }
810         }
811         /*
812          * Now, if we were good little boys and girls, Santa left us a request
813          * sense buffer.  We can extract information from this, so we
814          * can choose a block to remap, etc.
815          */
816         if (sense_valid && !sense_deferred) {
817                 switch (sshdr.sense_key) {
818                 case UNIT_ATTENTION:
819                         if (cmd->device->removable) {
820                                 /* detected disc change.  set a bit 
821                                  * and quietly refuse further access.
822                                  */
823                                 cmd->device->changed = 1;
824                                 cmd = scsi_end_request(cmd, 0,
825                                                 this_count, 1);
826                                 return;
827                         } else {
828                                 /*
829                                 * Must have been a power glitch, or a
830                                 * bus reset.  Could not have been a
831                                 * media change, so we just retry the
832                                 * request and see what happens.  
833                                 */
834                                 scsi_requeue_command(q, cmd);
835                                 return;
836                         }
837                         break;
838                 case ILLEGAL_REQUEST:
839                         /*
840                         * If we had an ILLEGAL REQUEST returned, then we may
841                         * have performed an unsupported command.  The only
842                         * thing this should be would be a ten byte read where
843                         * only a six byte read was supported.  Also, on a
844                         * system where READ CAPACITY failed, we may have read
845                         * past the end of the disk.
846                         */
847                         if (cmd->device->use_10_for_rw &&
848                             (cmd->cmnd[0] == READ_10 ||
849                              cmd->cmnd[0] == WRITE_10)) {
850                                 cmd->device->use_10_for_rw = 0;
851                                 /*
852                                  * This will cause a retry with a 6-byte
853                                  * command.
854                                  */
855                                 scsi_requeue_command(q, cmd);
856                                 result = 0;
857                         } else {
858                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
859                                 return;
860                         }
861                         break;
862                 case NOT_READY:
863                         /*
864                          * If the device is in the process of becoming ready,
865                          * retry.
866                          */
867                         if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
868                                 scsi_requeue_command(q, cmd);
869                                 return;
870                         }
871                         printk(KERN_INFO "Device %s not ready.\n",
872                                req->rq_disk ? req->rq_disk->disk_name : "");
873                         cmd = scsi_end_request(cmd, 0, this_count, 1);
874                         return;
875                 case VOLUME_OVERFLOW:
876                         printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
877                                cmd->device->host->host_no,
878                                (int)cmd->device->channel,
879                                (int)cmd->device->id, (int)cmd->device->lun);
880                         __scsi_print_command(cmd->data_cmnd);
881                         scsi_print_sense("", cmd);
882                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
883                         return;
884                 default:
885                         break;
886                 }
887         }                       /* driver byte != 0 */
888         if (host_byte(result) == DID_RESET) {
889                 /*
890                  * Third party bus reset or reset for error
891                  * recovery reasons.  Just retry the request
892                  * and see what happens.  
893                  */
894                 scsi_requeue_command(q, cmd);
895                 return;
896         }
897         if (result) {
898                 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
899                        "= 0x%x\n", cmd->device->host->host_no,
900                        cmd->device->channel,
901                        cmd->device->id,
902                        cmd->device->lun, result);
903
904                 if (driver_byte(result) & DRIVER_SENSE)
905                         scsi_print_sense("", cmd);
906                 /*
907                  * Mark a single buffer as not uptodate.  Queue the remainder.
908                  * We sometimes get this cruft in the event that a medium error
909                  * isn't properly reported.
910                  */
911                 block_bytes = req->hard_cur_sectors << 9;
912                 if (!block_bytes)
913                         block_bytes = req->data_len;
914                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
915         }
916 }
917 EXPORT_SYMBOL(scsi_io_completion);
918
919 /*
920  * Function:    scsi_init_io()
921  *
922  * Purpose:     SCSI I/O initialize function.
923  *
924  * Arguments:   cmd   - Command descriptor we wish to initialize
925  *
926  * Returns:     0 on success
927  *              BLKPREP_DEFER if the failure is retryable
928  *              BLKPREP_KILL if the failure is fatal
929  */
930 static int scsi_init_io(struct scsi_cmnd *cmd)
931 {
932         struct request     *req = cmd->request;
933         struct scatterlist *sgpnt;
934         int                count;
935
936         /*
937          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
938          */
939         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
940                 cmd->request_bufflen = req->data_len;
941                 cmd->request_buffer = req->data;
942                 req->buffer = req->data;
943                 cmd->use_sg = 0;
944                 return 0;
945         }
946
947         /*
948          * we used to not use scatter-gather for single segment request,
949          * but now we do (it makes highmem I/O easier to support without
950          * kmapping pages)
951          */
952         cmd->use_sg = req->nr_phys_segments;
953
954         /*
955          * if sg table allocation fails, requeue request later.
956          */
957         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
958         if (unlikely(!sgpnt))
959                 return BLKPREP_DEFER;
960
961         cmd->request_buffer = (char *) sgpnt;
962         cmd->request_bufflen = req->nr_sectors << 9;
963         if (blk_pc_request(req))
964                 cmd->request_bufflen = req->data_len;
965         req->buffer = NULL;
966
967         /* 
968          * Next, walk the list, and fill in the addresses and sizes of
969          * each segment.
970          */
971         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
972
973         /*
974          * mapped well, send it off
975          */
976         if (likely(count <= cmd->use_sg)) {
977                 cmd->use_sg = count;
978                 return 0;
979         }
980
981         printk(KERN_ERR "Incorrect number of segments after building list\n");
982         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
983         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
984                         req->current_nr_sectors);
985
986         /* release the command and kill it */
987         scsi_release_buffers(cmd);
988         scsi_put_command(cmd);
989         return BLKPREP_KILL;
990 }
991
992 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
993 {
994         struct scsi_device *sdev = q->queuedata;
995         struct scsi_driver *drv;
996
997         if (sdev->sdev_state == SDEV_RUNNING) {
998                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
999
1000                 if (drv->prepare_flush)
1001                         return drv->prepare_flush(q, rq);
1002         }
1003
1004         return 0;
1005 }
1006
1007 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1008 {
1009         struct scsi_device *sdev = q->queuedata;
1010         struct request *flush_rq = rq->end_io_data;
1011         struct scsi_driver *drv;
1012
1013         if (flush_rq->errors) {
1014                 printk("scsi: barrier error, disabling flush support\n");
1015                 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1016         }
1017
1018         if (sdev->sdev_state == SDEV_RUNNING) {
1019                 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1020                 drv->end_flush(q, rq);
1021         }
1022 }
1023
1024 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1025                                sector_t *error_sector)
1026 {
1027         struct scsi_device *sdev = q->queuedata;
1028         struct scsi_driver *drv;
1029
1030         if (sdev->sdev_state != SDEV_RUNNING)
1031                 return -ENXIO;
1032
1033         drv = *(struct scsi_driver **) disk->private_data;
1034         if (drv->issue_flush)
1035                 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1036
1037         return -EOPNOTSUPP;
1038 }
1039
1040 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1041 {
1042         struct scsi_device *sdev = q->queuedata;
1043         struct scsi_cmnd *cmd;
1044         int specials_only = 0;
1045
1046         /*
1047          * Just check to see if the device is online.  If it isn't, we
1048          * refuse to process any commands.  The device must be brought
1049          * online before trying any recovery commands
1050          */
1051         if (unlikely(!scsi_device_online(sdev))) {
1052                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1053                        sdev->host->host_no, sdev->id, sdev->lun);
1054                 return BLKPREP_KILL;
1055         }
1056         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1057                 /* OK, we're not in a running state don't prep
1058                  * user commands */
1059                 if (sdev->sdev_state == SDEV_DEL) {
1060                         /* Device is fully deleted, no commands
1061                          * at all allowed down */
1062                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1063                                sdev->host->host_no, sdev->id, sdev->lun);
1064                         return BLKPREP_KILL;
1065                 }
1066                 /* OK, we only allow special commands (i.e. not
1067                  * user initiated ones */
1068                 specials_only = sdev->sdev_state;
1069         }
1070
1071         /*
1072          * Find the actual device driver associated with this command.
1073          * The SPECIAL requests are things like character device or
1074          * ioctls, which did not originate from ll_rw_blk.  Note that
1075          * the special field is also used to indicate the cmd for
1076          * the remainder of a partially fulfilled request that can 
1077          * come up when there is a medium error.  We have to treat
1078          * these two cases differently.  We differentiate by looking
1079          * at request->cmd, as this tells us the real story.
1080          */
1081         if (req->flags & REQ_SPECIAL) {
1082                 struct scsi_request *sreq = req->special;
1083
1084                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1085                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1086                         if (unlikely(!cmd))
1087                                 goto defer;
1088                         scsi_init_cmd_from_req(cmd, sreq);
1089                 } else
1090                         cmd = req->special;
1091         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1092
1093                 if(unlikely(specials_only)) {
1094                         if(specials_only == SDEV_QUIESCE ||
1095                                         specials_only == SDEV_BLOCK)
1096                                 return BLKPREP_DEFER;
1097                         
1098                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1099                                sdev->host->host_no, sdev->id, sdev->lun);
1100                         return BLKPREP_KILL;
1101                 }
1102                         
1103                         
1104                 /*
1105                  * Now try and find a command block that we can use.
1106                  */
1107                 if (!req->special) {
1108                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1109                         if (unlikely(!cmd))
1110                                 goto defer;
1111                 } else
1112                         cmd = req->special;
1113                 
1114                 /* pull a tag out of the request if we have one */
1115                 cmd->tag = req->tag;
1116         } else {
1117                 blk_dump_rq_flags(req, "SCSI bad req");
1118                 return BLKPREP_KILL;
1119         }
1120         
1121         /* note the overloading of req->special.  When the tag
1122          * is active it always means cmd.  If the tag goes
1123          * back for re-queueing, it may be reset */
1124         req->special = cmd;
1125         cmd->request = req;
1126         
1127         /*
1128          * FIXME: drop the lock here because the functions below
1129          * expect to be called without the queue lock held.  Also,
1130          * previously, we dequeued the request before dropping the
1131          * lock.  We hope REQ_STARTED prevents anything untoward from
1132          * happening now.
1133          */
1134         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1135                 struct scsi_driver *drv;
1136                 int ret;
1137
1138                 /*
1139                  * This will do a couple of things:
1140                  *  1) Fill in the actual SCSI command.
1141                  *  2) Fill in any other upper-level specific fields
1142                  * (timeout).
1143                  *
1144                  * If this returns 0, it means that the request failed
1145                  * (reading past end of disk, reading offline device,
1146                  * etc).   This won't actually talk to the device, but
1147                  * some kinds of consistency checking may cause the     
1148                  * request to be rejected immediately.
1149                  */
1150
1151                 /* 
1152                  * This sets up the scatter-gather table (allocating if
1153                  * required).
1154                  */
1155                 ret = scsi_init_io(cmd);
1156                 if (ret)        /* BLKPREP_KILL return also releases the command */
1157                         return ret;
1158                 
1159                 /*
1160                  * Initialize the actual SCSI command for this request.
1161                  */
1162                 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1163                 if (unlikely(!drv->init_command(cmd))) {
1164                         scsi_release_buffers(cmd);
1165                         scsi_put_command(cmd);
1166                         return BLKPREP_KILL;
1167                 }
1168         }
1169
1170         /*
1171          * The request is now prepped, no need to come back here
1172          */
1173         req->flags |= REQ_DONTPREP;
1174         return BLKPREP_OK;
1175
1176  defer:
1177         /* If we defer, the elv_next_request() returns NULL, but the
1178          * queue must be restarted, so we plug here if no returning
1179          * command will automatically do that. */
1180         if (sdev->device_busy == 0)
1181                 blk_plug_device(q);
1182         return BLKPREP_DEFER;
1183 }
1184
1185 /*
1186  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1187  * return 0.
1188  *
1189  * Called with the queue_lock held.
1190  */
1191 static inline int scsi_dev_queue_ready(struct request_queue *q,
1192                                   struct scsi_device *sdev)
1193 {
1194         if (sdev->device_busy >= sdev->queue_depth)
1195                 return 0;
1196         if (sdev->device_busy == 0 && sdev->device_blocked) {
1197                 /*
1198                  * unblock after device_blocked iterates to zero
1199                  */
1200                 if (--sdev->device_blocked == 0) {
1201                         SCSI_LOG_MLQUEUE(3,
1202                                 printk("scsi%d (%d:%d) unblocking device at"
1203                                        " zero depth\n", sdev->host->host_no,
1204                                        sdev->id, sdev->lun));
1205                 } else {
1206                         blk_plug_device(q);
1207                         return 0;
1208                 }
1209         }
1210         if (sdev->device_blocked)
1211                 return 0;
1212
1213         return 1;
1214 }
1215
1216 /*
1217  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1218  * return 0. We must end up running the queue again whenever 0 is
1219  * returned, else IO can hang.
1220  *
1221  * Called with host_lock held.
1222  */
1223 static inline int scsi_host_queue_ready(struct request_queue *q,
1224                                    struct Scsi_Host *shost,
1225                                    struct scsi_device *sdev)
1226 {
1227         if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1228                 return 0;
1229         if (shost->host_busy == 0 && shost->host_blocked) {
1230                 /*
1231                  * unblock after host_blocked iterates to zero
1232                  */
1233                 if (--shost->host_blocked == 0) {
1234                         SCSI_LOG_MLQUEUE(3,
1235                                 printk("scsi%d unblocking host at zero depth\n",
1236                                         shost->host_no));
1237                 } else {
1238                         blk_plug_device(q);
1239                         return 0;
1240                 }
1241         }
1242         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1243             shost->host_blocked || shost->host_self_blocked) {
1244                 if (list_empty(&sdev->starved_entry))
1245                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1246                 return 0;
1247         }
1248
1249         /* We're OK to process the command, so we can't be starved */
1250         if (!list_empty(&sdev->starved_entry))
1251                 list_del_init(&sdev->starved_entry);
1252
1253         return 1;
1254 }
1255
1256 /*
1257  * Kill requests for a dead device
1258  */
1259 static void scsi_kill_requests(request_queue_t *q)
1260 {
1261         struct request *req;
1262
1263         while ((req = elv_next_request(q)) != NULL) {
1264                 blkdev_dequeue_request(req);
1265                 req->flags |= REQ_QUIET;
1266                 while (end_that_request_first(req, 0, req->nr_sectors))
1267                         ;
1268                 end_that_request_last(req);
1269         }
1270 }
1271
1272 /*
1273  * Function:    scsi_request_fn()
1274  *
1275  * Purpose:     Main strategy routine for SCSI.
1276  *
1277  * Arguments:   q       - Pointer to actual queue.
1278  *
1279  * Returns:     Nothing
1280  *
1281  * Lock status: IO request lock assumed to be held when called.
1282  */
1283 static void scsi_request_fn(struct request_queue *q)
1284 {
1285         struct scsi_device *sdev = q->queuedata;
1286         struct Scsi_Host *shost;
1287         struct scsi_cmnd *cmd;
1288         struct request *req;
1289
1290         if (!sdev) {
1291                 printk("scsi: killing requests for dead queue\n");
1292                 scsi_kill_requests(q);
1293                 return;
1294         }
1295
1296         if(!get_device(&sdev->sdev_gendev))
1297                 /* We must be tearing the block queue down already */
1298                 return;
1299
1300         /*
1301          * To start with, we keep looping until the queue is empty, or until
1302          * the host is no longer able to accept any more requests.
1303          */
1304         shost = sdev->host;
1305         while (!blk_queue_plugged(q)) {
1306                 int rtn;
1307                 /*
1308                  * get next queueable request.  We do this early to make sure
1309                  * that the request is fully prepared even if we cannot 
1310                  * accept it.
1311                  */
1312                 req = elv_next_request(q);
1313                 if (!req || !scsi_dev_queue_ready(q, sdev))
1314                         break;
1315
1316                 if (unlikely(!scsi_device_online(sdev))) {
1317                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1318                                sdev->host->host_no, sdev->id, sdev->lun);
1319                         blkdev_dequeue_request(req);
1320                         req->flags |= REQ_QUIET;
1321                         while (end_that_request_first(req, 0, req->nr_sectors))
1322                                 ;
1323                         end_that_request_last(req);
1324                         continue;
1325                 }
1326
1327
1328                 /*
1329                  * Remove the request from the request list.
1330                  */
1331                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1332                         blkdev_dequeue_request(req);
1333                 sdev->device_busy++;
1334
1335                 spin_unlock(q->queue_lock);
1336                 spin_lock(shost->host_lock);
1337
1338                 if (!scsi_host_queue_ready(q, shost, sdev))
1339                         goto not_ready;
1340                 if (sdev->single_lun) {
1341                         if (scsi_target(sdev)->starget_sdev_user &&
1342                             scsi_target(sdev)->starget_sdev_user != sdev)
1343                                 goto not_ready;
1344                         scsi_target(sdev)->starget_sdev_user = sdev;
1345                 }
1346                 shost->host_busy++;
1347
1348                 /*
1349                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1350                  *              take the lock again.
1351                  */
1352                 spin_unlock_irq(shost->host_lock);
1353
1354                 cmd = req->special;
1355                 if (unlikely(cmd == NULL)) {
1356                         printk(KERN_CRIT "impossible request in %s.\n"
1357                                          "please mail a stack trace to "
1358                                          "linux-scsi@vger.kernel.org",
1359                                          __FUNCTION__);
1360                         BUG();
1361                 }
1362
1363                 /*
1364                  * Finally, initialize any error handling parameters, and set up
1365                  * the timers for timeouts.
1366                  */
1367                 scsi_init_cmd_errh(cmd);
1368
1369                 /*
1370                  * Dispatch the command to the low-level driver.
1371                  */
1372                 rtn = scsi_dispatch_cmd(cmd);
1373                 spin_lock_irq(q->queue_lock);
1374                 if(rtn) {
1375                         /* we're refusing the command; because of
1376                          * the way locks get dropped, we need to 
1377                          * check here if plugging is required */
1378                         if(sdev->device_busy == 0)
1379                                 blk_plug_device(q);
1380
1381                         break;
1382                 }
1383         }
1384
1385         goto out;
1386
1387  not_ready:
1388         spin_unlock_irq(shost->host_lock);
1389
1390         /*
1391          * lock q, handle tag, requeue req, and decrement device_busy. We
1392          * must return with queue_lock held.
1393          *
1394          * Decrementing device_busy without checking it is OK, as all such
1395          * cases (host limits or settings) should run the queue at some
1396          * later time.
1397          */
1398         spin_lock_irq(q->queue_lock);
1399         blk_requeue_request(q, req);
1400         sdev->device_busy--;
1401         if(sdev->device_busy == 0)
1402                 blk_plug_device(q);
1403  out:
1404         /* must be careful here...if we trigger the ->remove() function
1405          * we cannot be holding the q lock */
1406         spin_unlock_irq(q->queue_lock);
1407         put_device(&sdev->sdev_gendev);
1408         spin_lock_irq(q->queue_lock);
1409 }
1410
1411 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1412 {
1413         struct device *host_dev;
1414         u64 bounce_limit = 0xffffffff;
1415
1416         if (shost->unchecked_isa_dma)
1417                 return BLK_BOUNCE_ISA;
1418         /*
1419          * Platforms with virtual-DMA translation
1420          * hardware have no practical limit.
1421          */
1422         if (!PCI_DMA_BUS_IS_PHYS)
1423                 return BLK_BOUNCE_ANY;
1424
1425         host_dev = scsi_get_device(shost);
1426         if (host_dev && host_dev->dma_mask)
1427                 bounce_limit = *host_dev->dma_mask;
1428
1429         return bounce_limit;
1430 }
1431 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1432
1433 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1434 {
1435         struct Scsi_Host *shost = sdev->host;
1436         struct request_queue *q;
1437
1438         q = blk_init_queue(scsi_request_fn, NULL);
1439         if (!q)
1440                 return NULL;
1441
1442         blk_queue_prep_rq(q, scsi_prep_fn);
1443
1444         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1445         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1446         blk_queue_max_sectors(q, shost->max_sectors);
1447         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1448         blk_queue_segment_boundary(q, shost->dma_boundary);
1449         blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1450
1451         /*
1452          * ordered tags are superior to flush ordering
1453          */
1454         if (shost->ordered_tag)
1455                 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1456         else if (shost->ordered_flush) {
1457                 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1458                 q->prepare_flush_fn = scsi_prepare_flush_fn;
1459                 q->end_flush_fn = scsi_end_flush_fn;
1460         }
1461
1462         if (!shost->use_clustering)
1463                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1464         return q;
1465 }
1466
1467 void scsi_free_queue(struct request_queue *q)
1468 {
1469         blk_cleanup_queue(q);
1470 }
1471
1472 /*
1473  * Function:    scsi_block_requests()
1474  *
1475  * Purpose:     Utility function used by low-level drivers to prevent further
1476  *              commands from being queued to the device.
1477  *
1478  * Arguments:   shost       - Host in question
1479  *
1480  * Returns:     Nothing
1481  *
1482  * Lock status: No locks are assumed held.
1483  *
1484  * Notes:       There is no timer nor any other means by which the requests
1485  *              get unblocked other than the low-level driver calling
1486  *              scsi_unblock_requests().
1487  */
1488 void scsi_block_requests(struct Scsi_Host *shost)
1489 {
1490         shost->host_self_blocked = 1;
1491 }
1492 EXPORT_SYMBOL(scsi_block_requests);
1493
1494 /*
1495  * Function:    scsi_unblock_requests()
1496  *
1497  * Purpose:     Utility function used by low-level drivers to allow further
1498  *              commands from being queued to the device.
1499  *
1500  * Arguments:   shost       - Host in question
1501  *
1502  * Returns:     Nothing
1503  *
1504  * Lock status: No locks are assumed held.
1505  *
1506  * Notes:       There is no timer nor any other means by which the requests
1507  *              get unblocked other than the low-level driver calling
1508  *              scsi_unblock_requests().
1509  *
1510  *              This is done as an API function so that changes to the
1511  *              internals of the scsi mid-layer won't require wholesale
1512  *              changes to drivers that use this feature.
1513  */
1514 void scsi_unblock_requests(struct Scsi_Host *shost)
1515 {
1516         shost->host_self_blocked = 0;
1517         scsi_run_host_queues(shost);
1518 }
1519 EXPORT_SYMBOL(scsi_unblock_requests);
1520
1521 int __init scsi_init_queue(void)
1522 {
1523         int i;
1524
1525         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1526                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1527                 int size = sgp->size * sizeof(struct scatterlist);
1528
1529                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1530                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1531                 if (!sgp->slab) {
1532                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1533                                         sgp->name);
1534                 }
1535
1536                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1537                                 mempool_alloc_slab, mempool_free_slab,
1538                                 sgp->slab);
1539                 if (!sgp->pool) {
1540                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1541                                         sgp->name);
1542                 }
1543         }
1544
1545         return 0;
1546 }
1547
1548 void scsi_exit_queue(void)
1549 {
1550         int i;
1551
1552         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1553                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1554                 mempool_destroy(sgp->pool);
1555                 kmem_cache_destroy(sgp->slab);
1556         }
1557 }
1558 /**
1559  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1560  *              six bytes if necessary.
1561  *      @sreq:  SCSI request to fill in with the MODE_SENSE
1562  *      @dbd:   set if mode sense will allow block descriptors to be returned
1563  *      @modepage: mode page being requested
1564  *      @buffer: request buffer (may not be smaller than eight bytes)
1565  *      @len:   length of request buffer.
1566  *      @timeout: command timeout
1567  *      @retries: number of retries before failing
1568  *      @data: returns a structure abstracting the mode header data
1569  *
1570  *      Returns zero if unsuccessful, or the header offset (either 4
1571  *      or 8 depending on whether a six or ten byte command was
1572  *      issued) if successful.
1573  **/
1574 int
1575 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1576                   unsigned char *buffer, int len, int timeout, int retries,
1577                   struct scsi_mode_data *data) {
1578         unsigned char cmd[12];
1579         int use_10_for_ms;
1580         int header_length;
1581
1582         memset(data, 0, sizeof(*data));
1583         memset(&cmd[0], 0, 12);
1584         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1585         cmd[2] = modepage;
1586
1587  retry:
1588         use_10_for_ms = sreq->sr_device->use_10_for_ms;
1589
1590         if (use_10_for_ms) {
1591                 if (len < 8)
1592                         len = 8;
1593
1594                 cmd[0] = MODE_SENSE_10;
1595                 cmd[8] = len;
1596                 header_length = 8;
1597         } else {
1598                 if (len < 4)
1599                         len = 4;
1600
1601                 cmd[0] = MODE_SENSE;
1602                 cmd[4] = len;
1603                 header_length = 4;
1604         }
1605
1606         sreq->sr_cmd_len = 0;
1607         memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1608         sreq->sr_data_direction = DMA_FROM_DEVICE;
1609
1610         memset(buffer, 0, len);
1611
1612         scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1613
1614         /* This code looks awful: what it's doing is making sure an
1615          * ILLEGAL REQUEST sense return identifies the actual command
1616          * byte as the problem.  MODE_SENSE commands can return
1617          * ILLEGAL REQUEST if the code page isn't supported */
1618
1619         if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1620             (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1621                 struct scsi_sense_hdr sshdr;
1622
1623                 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1624                         if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1625                             (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1626                                 /* 
1627                                  * Invalid command operation code
1628                                  */
1629                                 sreq->sr_device->use_10_for_ms = 0;
1630                                 goto retry;
1631                         }
1632                 }
1633         }
1634
1635         if(scsi_status_is_good(sreq->sr_result)) {
1636                 data->header_length = header_length;
1637                 if(use_10_for_ms) {
1638                         data->length = buffer[0]*256 + buffer[1] + 2;
1639                         data->medium_type = buffer[2];
1640                         data->device_specific = buffer[3];
1641                         data->longlba = buffer[4] & 0x01;
1642                         data->block_descriptor_length = buffer[6]*256
1643                                 + buffer[7];
1644                 } else {
1645                         data->length = buffer[0] + 1;
1646                         data->medium_type = buffer[1];
1647                         data->device_specific = buffer[2];
1648                         data->block_descriptor_length = buffer[3];
1649                 }
1650         }
1651
1652         return sreq->sr_result;
1653 }
1654 EXPORT_SYMBOL(__scsi_mode_sense);
1655
1656 /**
1657  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1658  *              six bytes if necessary.
1659  *      @sdev:  scsi device to send command to.
1660  *      @dbd:   set if mode sense will disable block descriptors in the return
1661  *      @modepage: mode page being requested
1662  *      @buffer: request buffer (may not be smaller than eight bytes)
1663  *      @len:   length of request buffer.
1664  *      @timeout: command timeout
1665  *      @retries: number of retries before failing
1666  *
1667  *      Returns zero if unsuccessful, or the header offset (either 4
1668  *      or 8 depending on whether a six or ten byte command was
1669  *      issued) if successful.
1670  **/
1671 int
1672 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1673                 unsigned char *buffer, int len, int timeout, int retries,
1674                 struct scsi_mode_data *data)
1675 {
1676         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1677         int ret;
1678
1679         if (!sreq)
1680                 return -1;
1681
1682         ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1683                                 timeout, retries, data);
1684
1685         scsi_release_request(sreq);
1686
1687         return ret;
1688 }
1689 EXPORT_SYMBOL(scsi_mode_sense);
1690
1691 int
1692 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1693 {
1694         struct scsi_request *sreq;
1695         char cmd[] = {
1696                 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1697         };
1698         int result;
1699         
1700         sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1701         if (!sreq)
1702                 return -ENOMEM;
1703
1704         sreq->sr_data_direction = DMA_NONE;
1705         scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1706
1707         if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1708                 struct scsi_sense_hdr sshdr;
1709
1710                 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1711                     ((sshdr.sense_key == UNIT_ATTENTION) ||
1712                      (sshdr.sense_key == NOT_READY))) {
1713                         sdev->changed = 1;
1714                         sreq->sr_result = 0;
1715                 }
1716         }
1717         result = sreq->sr_result;
1718         scsi_release_request(sreq);
1719         return result;
1720 }
1721 EXPORT_SYMBOL(scsi_test_unit_ready);
1722
1723 /**
1724  *      scsi_device_set_state - Take the given device through the device
1725  *              state model.
1726  *      @sdev:  scsi device to change the state of.
1727  *      @state: state to change to.
1728  *
1729  *      Returns zero if unsuccessful or an error if the requested 
1730  *      transition is illegal.
1731  **/
1732 int
1733 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1734 {
1735         enum scsi_device_state oldstate = sdev->sdev_state;
1736
1737         if (state == oldstate)
1738                 return 0;
1739
1740         switch (state) {
1741         case SDEV_CREATED:
1742                 /* There are no legal states that come back to
1743                  * created.  This is the manually initialised start
1744                  * state */
1745                 goto illegal;
1746                         
1747         case SDEV_RUNNING:
1748                 switch (oldstate) {
1749                 case SDEV_CREATED:
1750                 case SDEV_OFFLINE:
1751                 case SDEV_QUIESCE:
1752                 case SDEV_BLOCK:
1753                         break;
1754                 default:
1755                         goto illegal;
1756                 }
1757                 break;
1758
1759         case SDEV_QUIESCE:
1760                 switch (oldstate) {
1761                 case SDEV_RUNNING:
1762                 case SDEV_OFFLINE:
1763                         break;
1764                 default:
1765                         goto illegal;
1766                 }
1767                 break;
1768
1769         case SDEV_OFFLINE:
1770                 switch (oldstate) {
1771                 case SDEV_CREATED:
1772                 case SDEV_RUNNING:
1773                 case SDEV_QUIESCE:
1774                 case SDEV_BLOCK:
1775                         break;
1776                 default:
1777                         goto illegal;
1778                 }
1779                 break;
1780
1781         case SDEV_BLOCK:
1782                 switch (oldstate) {
1783                 case SDEV_CREATED:
1784                 case SDEV_RUNNING:
1785                         break;
1786                 default:
1787                         goto illegal;
1788                 }
1789                 break;
1790
1791         case SDEV_CANCEL:
1792                 switch (oldstate) {
1793                 case SDEV_CREATED:
1794                 case SDEV_RUNNING:
1795                 case SDEV_OFFLINE:
1796                 case SDEV_BLOCK:
1797                         break;
1798                 default:
1799                         goto illegal;
1800                 }
1801                 break;
1802
1803         case SDEV_DEL:
1804                 switch (oldstate) {
1805                 case SDEV_CANCEL:
1806                         break;
1807                 default:
1808                         goto illegal;
1809                 }
1810                 break;
1811
1812         }
1813         sdev->sdev_state = state;
1814         return 0;
1815
1816  illegal:
1817         SCSI_LOG_ERROR_RECOVERY(1, 
1818                                 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1819                                            "Illegal state transition %s->%s\n",
1820                                            scsi_device_state_name(oldstate),
1821                                            scsi_device_state_name(state))
1822                                 );
1823         return -EINVAL;
1824 }
1825 EXPORT_SYMBOL(scsi_device_set_state);
1826
1827 /**
1828  *      scsi_device_quiesce - Block user issued commands.
1829  *      @sdev:  scsi device to quiesce.
1830  *
1831  *      This works by trying to transition to the SDEV_QUIESCE state
1832  *      (which must be a legal transition).  When the device is in this
1833  *      state, only special requests will be accepted, all others will
1834  *      be deferred.  Since special requests may also be requeued requests,
1835  *      a successful return doesn't guarantee the device will be 
1836  *      totally quiescent.
1837  *
1838  *      Must be called with user context, may sleep.
1839  *
1840  *      Returns zero if unsuccessful or an error if not.
1841  **/
1842 int
1843 scsi_device_quiesce(struct scsi_device *sdev)
1844 {
1845         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1846         if (err)
1847                 return err;
1848
1849         scsi_run_queue(sdev->request_queue);
1850         while (sdev->device_busy) {
1851                 msleep_interruptible(200);
1852                 scsi_run_queue(sdev->request_queue);
1853         }
1854         return 0;
1855 }
1856 EXPORT_SYMBOL(scsi_device_quiesce);
1857
1858 /**
1859  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1860  *      @sdev:  scsi device to resume.
1861  *
1862  *      Moves the device from quiesced back to running and restarts the
1863  *      queues.
1864  *
1865  *      Must be called with user context, may sleep.
1866  **/
1867 void
1868 scsi_device_resume(struct scsi_device *sdev)
1869 {
1870         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1871                 return;
1872         scsi_run_queue(sdev->request_queue);
1873 }
1874 EXPORT_SYMBOL(scsi_device_resume);
1875
1876 static void
1877 device_quiesce_fn(struct scsi_device *sdev, void *data)
1878 {
1879         scsi_device_quiesce(sdev);
1880 }
1881
1882 void
1883 scsi_target_quiesce(struct scsi_target *starget)
1884 {
1885         starget_for_each_device(starget, NULL, device_quiesce_fn);
1886 }
1887 EXPORT_SYMBOL(scsi_target_quiesce);
1888
1889 static void
1890 device_resume_fn(struct scsi_device *sdev, void *data)
1891 {
1892         scsi_device_resume(sdev);
1893 }
1894
1895 void
1896 scsi_target_resume(struct scsi_target *starget)
1897 {
1898         starget_for_each_device(starget, NULL, device_resume_fn);
1899 }
1900 EXPORT_SYMBOL(scsi_target_resume);
1901
1902 /**
1903  * scsi_internal_device_block - internal function to put a device
1904  *                              temporarily into the SDEV_BLOCK state
1905  * @sdev:       device to block
1906  *
1907  * Block request made by scsi lld's to temporarily stop all
1908  * scsi commands on the specified device.  Called from interrupt
1909  * or normal process context.
1910  *
1911  * Returns zero if successful or error if not
1912  *
1913  * Notes:       
1914  *      This routine transitions the device to the SDEV_BLOCK state
1915  *      (which must be a legal transition).  When the device is in this
1916  *      state, all commands are deferred until the scsi lld reenables
1917  *      the device with scsi_device_unblock or device_block_tmo fires.
1918  *      This routine assumes the host_lock is held on entry.
1919  **/
1920 int
1921 scsi_internal_device_block(struct scsi_device *sdev)
1922 {
1923         request_queue_t *q = sdev->request_queue;
1924         unsigned long flags;
1925         int err = 0;
1926
1927         err = scsi_device_set_state(sdev, SDEV_BLOCK);
1928         if (err)
1929                 return err;
1930
1931         /* 
1932          * The device has transitioned to SDEV_BLOCK.  Stop the
1933          * block layer from calling the midlayer with this device's
1934          * request queue. 
1935          */
1936         spin_lock_irqsave(q->queue_lock, flags);
1937         blk_stop_queue(q);
1938         spin_unlock_irqrestore(q->queue_lock, flags);
1939
1940         return 0;
1941 }
1942 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1943  
1944 /**
1945  * scsi_internal_device_unblock - resume a device after a block request
1946  * @sdev:       device to resume
1947  *
1948  * Called by scsi lld's or the midlayer to restart the device queue
1949  * for the previously suspended scsi device.  Called from interrupt or
1950  * normal process context.
1951  *
1952  * Returns zero if successful or error if not.
1953  *
1954  * Notes:       
1955  *      This routine transitions the device to the SDEV_RUNNING state
1956  *      (which must be a legal transition) allowing the midlayer to
1957  *      goose the queue for this device.  This routine assumes the 
1958  *      host_lock is held upon entry.
1959  **/
1960 int
1961 scsi_internal_device_unblock(struct scsi_device *sdev)
1962 {
1963         request_queue_t *q = sdev->request_queue; 
1964         int err;
1965         unsigned long flags;
1966         
1967         /* 
1968          * Try to transition the scsi device to SDEV_RUNNING
1969          * and goose the device queue if successful.  
1970          */
1971         err = scsi_device_set_state(sdev, SDEV_RUNNING);
1972         if (err)
1973                 return err;
1974
1975         spin_lock_irqsave(q->queue_lock, flags);
1976         blk_start_queue(q);
1977         spin_unlock_irqrestore(q->queue_lock, flags);
1978
1979         return 0;
1980 }
1981 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1982
1983 static void
1984 device_block(struct scsi_device *sdev, void *data)
1985 {
1986         scsi_internal_device_block(sdev);
1987 }
1988
1989 static int
1990 target_block(struct device *dev, void *data)
1991 {
1992         if (scsi_is_target_device(dev))
1993                 starget_for_each_device(to_scsi_target(dev), NULL,
1994                                         device_block);
1995         return 0;
1996 }
1997
1998 void
1999 scsi_target_block(struct device *dev)
2000 {
2001         if (scsi_is_target_device(dev))
2002                 starget_for_each_device(to_scsi_target(dev), NULL,
2003                                         device_block);
2004         else
2005                 device_for_each_child(dev, NULL, target_block);
2006 }
2007 EXPORT_SYMBOL_GPL(scsi_target_block);
2008
2009 static void
2010 device_unblock(struct scsi_device *sdev, void *data)
2011 {
2012         scsi_internal_device_unblock(sdev);
2013 }
2014
2015 static int
2016 target_unblock(struct device *dev, void *data)
2017 {
2018         if (scsi_is_target_device(dev))
2019                 starget_for_each_device(to_scsi_target(dev), NULL,
2020                                         device_unblock);
2021         return 0;
2022 }
2023
2024 void
2025 scsi_target_unblock(struct device *dev)
2026 {
2027         if (scsi_is_target_device(dev))
2028                 starget_for_each_device(to_scsi_target(dev), NULL,
2029                                         device_unblock);
2030         else
2031                 device_for_each_child(dev, NULL, target_unblock);
2032 }
2033 EXPORT_SYMBOL_GPL(scsi_target_unblock);