]> err.no Git - linux-2.6/blob - drivers/block/ll_rw_blk.c
[PATCH] blk: use find_first_zero_bit() in blk_queue_start_tag()
[linux-2.6] / drivers / block / ll_rw_blk.c
1 /*
2  *  linux/drivers/block/ll_rw_blk.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
6  * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
7  * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
8  * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
9  * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10  */
11
12 /*
13  * This handles all read/write requests to block devices
14  */
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/backing-dev.h>
19 #include <linux/bio.h>
20 #include <linux/blkdev.h>
21 #include <linux/highmem.h>
22 #include <linux/mm.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h>      /* for max_pfn/max_low_pfn */
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/blkdev.h>
32
33 /*
34  * for max sense size
35  */
36 #include <scsi/scsi_cmnd.h>
37
38 static void blk_unplug_work(void *data);
39 static void blk_unplug_timeout(unsigned long data);
40
41 /*
42  * For the allocated request tables
43  */
44 static kmem_cache_t *request_cachep;
45
46 /*
47  * For queue allocation
48  */
49 static kmem_cache_t *requestq_cachep;
50
51 /*
52  * For io context allocations
53  */
54 static kmem_cache_t *iocontext_cachep;
55
56 static wait_queue_head_t congestion_wqh[2] = {
57                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
58                 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
59         };
60
61 /*
62  * Controlling structure to kblockd
63  */
64 static struct workqueue_struct *kblockd_workqueue; 
65
66 unsigned long blk_max_low_pfn, blk_max_pfn;
67
68 EXPORT_SYMBOL(blk_max_low_pfn);
69 EXPORT_SYMBOL(blk_max_pfn);
70
71 /* Amount of time in which a process may batch requests */
72 #define BLK_BATCH_TIME  (HZ/50UL)
73
74 /* Number of requests a "batching" process may submit */
75 #define BLK_BATCH_REQ   32
76
77 /*
78  * Return the threshold (number of used requests) at which the queue is
79  * considered to be congested.  It include a little hysteresis to keep the
80  * context switch rate down.
81  */
82 static inline int queue_congestion_on_threshold(struct request_queue *q)
83 {
84         return q->nr_congestion_on;
85 }
86
87 /*
88  * The threshold at which a queue is considered to be uncongested
89  */
90 static inline int queue_congestion_off_threshold(struct request_queue *q)
91 {
92         return q->nr_congestion_off;
93 }
94
95 static void blk_queue_congestion_threshold(struct request_queue *q)
96 {
97         int nr;
98
99         nr = q->nr_requests - (q->nr_requests / 8) + 1;
100         if (nr > q->nr_requests)
101                 nr = q->nr_requests;
102         q->nr_congestion_on = nr;
103
104         nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
105         if (nr < 1)
106                 nr = 1;
107         q->nr_congestion_off = nr;
108 }
109
110 /*
111  * A queue has just exitted congestion.  Note this in the global counter of
112  * congested queues, and wake up anyone who was waiting for requests to be
113  * put back.
114  */
115 static void clear_queue_congested(request_queue_t *q, int rw)
116 {
117         enum bdi_state bit;
118         wait_queue_head_t *wqh = &congestion_wqh[rw];
119
120         bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
121         clear_bit(bit, &q->backing_dev_info.state);
122         smp_mb__after_clear_bit();
123         if (waitqueue_active(wqh))
124                 wake_up(wqh);
125 }
126
127 /*
128  * A queue has just entered congestion.  Flag that in the queue's VM-visible
129  * state flags and increment the global gounter of congested queues.
130  */
131 static void set_queue_congested(request_queue_t *q, int rw)
132 {
133         enum bdi_state bit;
134
135         bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
136         set_bit(bit, &q->backing_dev_info.state);
137 }
138
139 /**
140  * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
141  * @bdev:       device
142  *
143  * Locates the passed device's request queue and returns the address of its
144  * backing_dev_info
145  *
146  * Will return NULL if the request queue cannot be located.
147  */
148 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
149 {
150         struct backing_dev_info *ret = NULL;
151         request_queue_t *q = bdev_get_queue(bdev);
152
153         if (q)
154                 ret = &q->backing_dev_info;
155         return ret;
156 }
157
158 EXPORT_SYMBOL(blk_get_backing_dev_info);
159
160 void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
161 {
162         q->activity_fn = fn;
163         q->activity_data = data;
164 }
165
166 EXPORT_SYMBOL(blk_queue_activity_fn);
167
168 /**
169  * blk_queue_prep_rq - set a prepare_request function for queue
170  * @q:          queue
171  * @pfn:        prepare_request function
172  *
173  * It's possible for a queue to register a prepare_request callback which
174  * is invoked before the request is handed to the request_fn. The goal of
175  * the function is to prepare a request for I/O, it can be used to build a
176  * cdb from the request data for instance.
177  *
178  */
179 void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
180 {
181         q->prep_rq_fn = pfn;
182 }
183
184 EXPORT_SYMBOL(blk_queue_prep_rq);
185
186 /**
187  * blk_queue_merge_bvec - set a merge_bvec function for queue
188  * @q:          queue
189  * @mbfn:       merge_bvec_fn
190  *
191  * Usually queues have static limitations on the max sectors or segments that
192  * we can put in a request. Stacking drivers may have some settings that
193  * are dynamic, and thus we have to query the queue whether it is ok to
194  * add a new bio_vec to a bio at a given offset or not. If the block device
195  * has such limitations, it needs to register a merge_bvec_fn to control
196  * the size of bio's sent to it. Note that a block device *must* allow a
197  * single page to be added to an empty bio. The block device driver may want
198  * to use the bio_split() function to deal with these bio's. By default
199  * no merge_bvec_fn is defined for a queue, and only the fixed limits are
200  * honored.
201  */
202 void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
203 {
204         q->merge_bvec_fn = mbfn;
205 }
206
207 EXPORT_SYMBOL(blk_queue_merge_bvec);
208
209 /**
210  * blk_queue_make_request - define an alternate make_request function for a device
211  * @q:  the request queue for the device to be affected
212  * @mfn: the alternate make_request function
213  *
214  * Description:
215  *    The normal way for &struct bios to be passed to a device
216  *    driver is for them to be collected into requests on a request
217  *    queue, and then to allow the device driver to select requests
218  *    off that queue when it is ready.  This works well for many block
219  *    devices. However some block devices (typically virtual devices
220  *    such as md or lvm) do not benefit from the processing on the
221  *    request queue, and are served best by having the requests passed
222  *    directly to them.  This can be achieved by providing a function
223  *    to blk_queue_make_request().
224  *
225  * Caveat:
226  *    The driver that does this *must* be able to deal appropriately
227  *    with buffers in "highmemory". This can be accomplished by either calling
228  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
229  *    blk_queue_bounce() to create a buffer in normal memory.
230  **/
231 void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
232 {
233         /*
234          * set defaults
235          */
236         q->nr_requests = BLKDEV_MAX_RQ;
237         q->max_phys_segments = MAX_PHYS_SEGMENTS;
238         q->max_hw_segments = MAX_HW_SEGMENTS;
239         q->make_request_fn = mfn;
240         q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
241         q->backing_dev_info.state = 0;
242         q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
243         blk_queue_max_sectors(q, MAX_SECTORS);
244         blk_queue_hardsect_size(q, 512);
245         blk_queue_dma_alignment(q, 511);
246         blk_queue_congestion_threshold(q);
247         q->nr_batching = BLK_BATCH_REQ;
248
249         q->unplug_thresh = 4;           /* hmm */
250         q->unplug_delay = (3 * HZ) / 1000;      /* 3 milliseconds */
251         if (q->unplug_delay == 0)
252                 q->unplug_delay = 1;
253
254         INIT_WORK(&q->unplug_work, blk_unplug_work, q);
255
256         q->unplug_timer.function = blk_unplug_timeout;
257         q->unplug_timer.data = (unsigned long)q;
258
259         /*
260          * by default assume old behaviour and bounce for any highmem page
261          */
262         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
263
264         blk_queue_activity_fn(q, NULL, NULL);
265
266         INIT_LIST_HEAD(&q->drain_list);
267 }
268
269 EXPORT_SYMBOL(blk_queue_make_request);
270
271 static inline void rq_init(request_queue_t *q, struct request *rq)
272 {
273         INIT_LIST_HEAD(&rq->queuelist);
274
275         rq->errors = 0;
276         rq->rq_status = RQ_ACTIVE;
277         rq->bio = rq->biotail = NULL;
278         rq->buffer = NULL;
279         rq->ref_count = 1;
280         rq->q = q;
281         rq->waiting = NULL;
282         rq->special = NULL;
283         rq->data_len = 0;
284         rq->data = NULL;
285         rq->sense = NULL;
286         rq->end_io = NULL;
287         rq->end_io_data = NULL;
288 }
289
290 /**
291  * blk_queue_ordered - does this queue support ordered writes
292  * @q:     the request queue
293  * @flag:  see below
294  *
295  * Description:
296  *   For journalled file systems, doing ordered writes on a commit
297  *   block instead of explicitly doing wait_on_buffer (which is bad
298  *   for performance) can be a big win. Block drivers supporting this
299  *   feature should call this function and indicate so.
300  *
301  **/
302 void blk_queue_ordered(request_queue_t *q, int flag)
303 {
304         switch (flag) {
305                 case QUEUE_ORDERED_NONE:
306                         if (q->flush_rq)
307                                 kmem_cache_free(request_cachep, q->flush_rq);
308                         q->flush_rq = NULL;
309                         q->ordered = flag;
310                         break;
311                 case QUEUE_ORDERED_TAG:
312                         q->ordered = flag;
313                         break;
314                 case QUEUE_ORDERED_FLUSH:
315                         q->ordered = flag;
316                         if (!q->flush_rq)
317                                 q->flush_rq = kmem_cache_alloc(request_cachep,
318                                                                 GFP_KERNEL);
319                         break;
320                 default:
321                         printk("blk_queue_ordered: bad value %d\n", flag);
322                         break;
323         }
324 }
325
326 EXPORT_SYMBOL(blk_queue_ordered);
327
328 /**
329  * blk_queue_issue_flush_fn - set function for issuing a flush
330  * @q:     the request queue
331  * @iff:   the function to be called issuing the flush
332  *
333  * Description:
334  *   If a driver supports issuing a flush command, the support is notified
335  *   to the block layer by defining it through this call.
336  *
337  **/
338 void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
339 {
340         q->issue_flush_fn = iff;
341 }
342
343 EXPORT_SYMBOL(blk_queue_issue_flush_fn);
344
345 /*
346  * Cache flushing for ordered writes handling
347  */
348 static void blk_pre_flush_end_io(struct request *flush_rq)
349 {
350         struct request *rq = flush_rq->end_io_data;
351         request_queue_t *q = rq->q;
352
353         rq->flags |= REQ_BAR_PREFLUSH;
354
355         if (!flush_rq->errors)
356                 elv_requeue_request(q, rq);
357         else {
358                 q->end_flush_fn(q, flush_rq);
359                 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
360                 q->request_fn(q);
361         }
362 }
363
364 static void blk_post_flush_end_io(struct request *flush_rq)
365 {
366         struct request *rq = flush_rq->end_io_data;
367         request_queue_t *q = rq->q;
368
369         rq->flags |= REQ_BAR_POSTFLUSH;
370
371         q->end_flush_fn(q, flush_rq);
372         clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
373         q->request_fn(q);
374 }
375
376 struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
377 {
378         struct request *flush_rq = q->flush_rq;
379
380         BUG_ON(!blk_barrier_rq(rq));
381
382         if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
383                 return NULL;
384
385         rq_init(q, flush_rq);
386         flush_rq->elevator_private = NULL;
387         flush_rq->flags = REQ_BAR_FLUSH;
388         flush_rq->rq_disk = rq->rq_disk;
389         flush_rq->rl = NULL;
390
391         /*
392          * prepare_flush returns 0 if no flush is needed, just mark both
393          * pre and post flush as done in that case
394          */
395         if (!q->prepare_flush_fn(q, flush_rq)) {
396                 rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
397                 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
398                 return rq;
399         }
400
401         /*
402          * some drivers dequeue requests right away, some only after io
403          * completion. make sure the request is dequeued.
404          */
405         if (!list_empty(&rq->queuelist))
406                 blkdev_dequeue_request(rq);
407
408         elv_deactivate_request(q, rq);
409
410         flush_rq->end_io_data = rq;
411         flush_rq->end_io = blk_pre_flush_end_io;
412
413         __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
414         return flush_rq;
415 }
416
417 static void blk_start_post_flush(request_queue_t *q, struct request *rq)
418 {
419         struct request *flush_rq = q->flush_rq;
420
421         BUG_ON(!blk_barrier_rq(rq));
422
423         rq_init(q, flush_rq);
424         flush_rq->elevator_private = NULL;
425         flush_rq->flags = REQ_BAR_FLUSH;
426         flush_rq->rq_disk = rq->rq_disk;
427         flush_rq->rl = NULL;
428
429         if (q->prepare_flush_fn(q, flush_rq)) {
430                 flush_rq->end_io_data = rq;
431                 flush_rq->end_io = blk_post_flush_end_io;
432
433                 __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
434                 q->request_fn(q);
435         }
436 }
437
438 static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
439                                         int sectors)
440 {
441         if (sectors > rq->nr_sectors)
442                 sectors = rq->nr_sectors;
443
444         rq->nr_sectors -= sectors;
445         return rq->nr_sectors;
446 }
447
448 static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
449                                      int sectors, int queue_locked)
450 {
451         if (q->ordered != QUEUE_ORDERED_FLUSH)
452                 return 0;
453         if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
454                 return 0;
455         if (blk_barrier_postflush(rq))
456                 return 0;
457
458         if (!blk_check_end_barrier(q, rq, sectors)) {
459                 unsigned long flags = 0;
460
461                 if (!queue_locked)
462                         spin_lock_irqsave(q->queue_lock, flags);
463
464                 blk_start_post_flush(q, rq);
465
466                 if (!queue_locked)
467                         spin_unlock_irqrestore(q->queue_lock, flags);
468         }
469
470         return 1;
471 }
472
473 /**
474  * blk_complete_barrier_rq - complete possible barrier request
475  * @q:  the request queue for the device
476  * @rq:  the request
477  * @sectors:  number of sectors to complete
478  *
479  * Description:
480  *   Used in driver end_io handling to determine whether to postpone
481  *   completion of a barrier request until a post flush has been done. This
482  *   is the unlocked variant, used if the caller doesn't already hold the
483  *   queue lock.
484  **/
485 int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
486 {
487         return __blk_complete_barrier_rq(q, rq, sectors, 0);
488 }
489 EXPORT_SYMBOL(blk_complete_barrier_rq);
490
491 /**
492  * blk_complete_barrier_rq_locked - complete possible barrier request
493  * @q:  the request queue for the device
494  * @rq:  the request
495  * @sectors:  number of sectors to complete
496  *
497  * Description:
498  *   See blk_complete_barrier_rq(). This variant must be used if the caller
499  *   holds the queue lock.
500  **/
501 int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
502                                    int sectors)
503 {
504         return __blk_complete_barrier_rq(q, rq, sectors, 1);
505 }
506 EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
507
508 /**
509  * blk_queue_bounce_limit - set bounce buffer limit for queue
510  * @q:  the request queue for the device
511  * @dma_addr:   bus address limit
512  *
513  * Description:
514  *    Different hardware can have different requirements as to what pages
515  *    it can do I/O directly to. A low level driver can call
516  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
517  *    buffers for doing I/O to pages residing above @page. By default
518  *    the block layer sets this to the highest numbered "low" memory page.
519  **/
520 void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
521 {
522         unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
523
524         /*
525          * set appropriate bounce gfp mask -- unfortunately we don't have a
526          * full 4GB zone, so we have to resort to low memory for any bounces.
527          * ISA has its own < 16MB zone.
528          */
529         if (bounce_pfn < blk_max_low_pfn) {
530                 BUG_ON(dma_addr < BLK_BOUNCE_ISA);
531                 init_emergency_isa_pool();
532                 q->bounce_gfp = GFP_NOIO | GFP_DMA;
533         } else
534                 q->bounce_gfp = GFP_NOIO;
535
536         q->bounce_pfn = bounce_pfn;
537 }
538
539 EXPORT_SYMBOL(blk_queue_bounce_limit);
540
541 /**
542  * blk_queue_max_sectors - set max sectors for a request for this queue
543  * @q:  the request queue for the device
544  * @max_sectors:  max sectors in the usual 512b unit
545  *
546  * Description:
547  *    Enables a low level driver to set an upper limit on the size of
548  *    received requests.
549  **/
550 void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
551 {
552         if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
553                 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
554                 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
555         }
556
557         q->max_sectors = q->max_hw_sectors = max_sectors;
558 }
559
560 EXPORT_SYMBOL(blk_queue_max_sectors);
561
562 /**
563  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
564  * @q:  the request queue for the device
565  * @max_segments:  max number of segments
566  *
567  * Description:
568  *    Enables a low level driver to set an upper limit on the number of
569  *    physical data segments in a request.  This would be the largest sized
570  *    scatter list the driver could handle.
571  **/
572 void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
573 {
574         if (!max_segments) {
575                 max_segments = 1;
576                 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
577         }
578
579         q->max_phys_segments = max_segments;
580 }
581
582 EXPORT_SYMBOL(blk_queue_max_phys_segments);
583
584 /**
585  * blk_queue_max_hw_segments - set max hw segments for a request for this queue
586  * @q:  the request queue for the device
587  * @max_segments:  max number of segments
588  *
589  * Description:
590  *    Enables a low level driver to set an upper limit on the number of
591  *    hw data segments in a request.  This would be the largest number of
592  *    address/length pairs the host adapter can actually give as once
593  *    to the device.
594  **/
595 void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
596 {
597         if (!max_segments) {
598                 max_segments = 1;
599                 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
600         }
601
602         q->max_hw_segments = max_segments;
603 }
604
605 EXPORT_SYMBOL(blk_queue_max_hw_segments);
606
607 /**
608  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
609  * @q:  the request queue for the device
610  * @max_size:  max size of segment in bytes
611  *
612  * Description:
613  *    Enables a low level driver to set an upper limit on the size of a
614  *    coalesced segment
615  **/
616 void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
617 {
618         if (max_size < PAGE_CACHE_SIZE) {
619                 max_size = PAGE_CACHE_SIZE;
620                 printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
621         }
622
623         q->max_segment_size = max_size;
624 }
625
626 EXPORT_SYMBOL(blk_queue_max_segment_size);
627
628 /**
629  * blk_queue_hardsect_size - set hardware sector size for the queue
630  * @q:  the request queue for the device
631  * @size:  the hardware sector size, in bytes
632  *
633  * Description:
634  *   This should typically be set to the lowest possible sector size
635  *   that the hardware can operate on (possible without reverting to
636  *   even internal read-modify-write operations). Usually the default
637  *   of 512 covers most hardware.
638  **/
639 void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
640 {
641         q->hardsect_size = size;
642 }
643
644 EXPORT_SYMBOL(blk_queue_hardsect_size);
645
646 /*
647  * Returns the minimum that is _not_ zero, unless both are zero.
648  */
649 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
650
651 /**
652  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
653  * @t:  the stacking driver (top)
654  * @b:  the underlying device (bottom)
655  **/
656 void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
657 {
658         /* zero is "infinity" */
659         t->max_sectors = t->max_hw_sectors =
660                 min_not_zero(t->max_sectors,b->max_sectors);
661
662         t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
663         t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
664         t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
665         t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
666 }
667
668 EXPORT_SYMBOL(blk_queue_stack_limits);
669
670 /**
671  * blk_queue_segment_boundary - set boundary rules for segment merging
672  * @q:  the request queue for the device
673  * @mask:  the memory boundary mask
674  **/
675 void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
676 {
677         if (mask < PAGE_CACHE_SIZE - 1) {
678                 mask = PAGE_CACHE_SIZE - 1;
679                 printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
680         }
681
682         q->seg_boundary_mask = mask;
683 }
684
685 EXPORT_SYMBOL(blk_queue_segment_boundary);
686
687 /**
688  * blk_queue_dma_alignment - set dma length and memory alignment
689  * @q:     the request queue for the device
690  * @mask:  alignment mask
691  *
692  * description:
693  *    set required memory and length aligment for direct dma transactions.
694  *    this is used when buiding direct io requests for the queue.
695  *
696  **/
697 void blk_queue_dma_alignment(request_queue_t *q, int mask)
698 {
699         q->dma_alignment = mask;
700 }
701
702 EXPORT_SYMBOL(blk_queue_dma_alignment);
703
704 /**
705  * blk_queue_find_tag - find a request by its tag and queue
706  *
707  * @q:   The request queue for the device
708  * @tag: The tag of the request
709  *
710  * Notes:
711  *    Should be used when a device returns a tag and you want to match
712  *    it with a request.
713  *
714  *    no locks need be held.
715  **/
716 struct request *blk_queue_find_tag(request_queue_t *q, int tag)
717 {
718         struct blk_queue_tag *bqt = q->queue_tags;
719
720         if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
721                 return NULL;
722
723         return bqt->tag_index[tag];
724 }
725
726 EXPORT_SYMBOL(blk_queue_find_tag);
727
728 /**
729  * __blk_queue_free_tags - release tag maintenance info
730  * @q:  the request queue for the device
731  *
732  *  Notes:
733  *    blk_cleanup_queue() will take care of calling this function, if tagging
734  *    has been used. So there's no need to call this directly.
735  **/
736 static void __blk_queue_free_tags(request_queue_t *q)
737 {
738         struct blk_queue_tag *bqt = q->queue_tags;
739
740         if (!bqt)
741                 return;
742
743         if (atomic_dec_and_test(&bqt->refcnt)) {
744                 BUG_ON(bqt->busy);
745                 BUG_ON(!list_empty(&bqt->busy_list));
746
747                 kfree(bqt->tag_index);
748                 bqt->tag_index = NULL;
749
750                 kfree(bqt->tag_map);
751                 bqt->tag_map = NULL;
752
753                 kfree(bqt);
754         }
755
756         q->queue_tags = NULL;
757         q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
758 }
759
760 /**
761  * blk_queue_free_tags - release tag maintenance info
762  * @q:  the request queue for the device
763  *
764  *  Notes:
765  *      This is used to disabled tagged queuing to a device, yet leave
766  *      queue in function.
767  **/
768 void blk_queue_free_tags(request_queue_t *q)
769 {
770         clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
771 }
772
773 EXPORT_SYMBOL(blk_queue_free_tags);
774
775 static int
776 init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
777 {
778         int bits, i;
779         struct request **tag_index;
780         unsigned long *tag_map;
781
782         if (depth > q->nr_requests * 2) {
783                 depth = q->nr_requests * 2;
784                 printk(KERN_ERR "%s: adjusted depth to %d\n",
785                                 __FUNCTION__, depth);
786         }
787
788         tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
789         if (!tag_index)
790                 goto fail;
791
792         bits = (depth / BLK_TAGS_PER_LONG) + 1;
793         tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
794         if (!tag_map)
795                 goto fail;
796
797         memset(tag_index, 0, depth * sizeof(struct request *));
798         memset(tag_map, 0, bits * sizeof(unsigned long));
799         tags->max_depth = depth;
800         tags->real_max_depth = bits * BITS_PER_LONG;
801         tags->tag_index = tag_index;
802         tags->tag_map = tag_map;
803
804         /*
805          * set the upper bits if the depth isn't a multiple of the word size
806          */
807         for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
808                 __set_bit(i, tag_map);
809
810         return 0;
811 fail:
812         kfree(tag_index);
813         return -ENOMEM;
814 }
815
816 /**
817  * blk_queue_init_tags - initialize the queue tag info
818  * @q:  the request queue for the device
819  * @depth:  the maximum queue depth supported
820  * @tags: the tag to use
821  **/
822 int blk_queue_init_tags(request_queue_t *q, int depth,
823                         struct blk_queue_tag *tags)
824 {
825         int rc;
826
827         BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
828
829         if (!tags && !q->queue_tags) {
830                 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
831                 if (!tags)
832                         goto fail;
833
834                 if (init_tag_map(q, tags, depth))
835                         goto fail;
836
837                 INIT_LIST_HEAD(&tags->busy_list);
838                 tags->busy = 0;
839                 atomic_set(&tags->refcnt, 1);
840         } else if (q->queue_tags) {
841                 if ((rc = blk_queue_resize_tags(q, depth)))
842                         return rc;
843                 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
844                 return 0;
845         } else
846                 atomic_inc(&tags->refcnt);
847
848         /*
849          * assign it, all done
850          */
851         q->queue_tags = tags;
852         q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
853         return 0;
854 fail:
855         kfree(tags);
856         return -ENOMEM;
857 }
858
859 EXPORT_SYMBOL(blk_queue_init_tags);
860
861 /**
862  * blk_queue_resize_tags - change the queueing depth
863  * @q:  the request queue for the device
864  * @new_depth: the new max command queueing depth
865  *
866  *  Notes:
867  *    Must be called with the queue lock held.
868  **/
869 int blk_queue_resize_tags(request_queue_t *q, int new_depth)
870 {
871         struct blk_queue_tag *bqt = q->queue_tags;
872         struct request **tag_index;
873         unsigned long *tag_map;
874         int bits, max_depth;
875
876         if (!bqt)
877                 return -ENXIO;
878
879         /*
880          * don't bother sizing down
881          */
882         if (new_depth <= bqt->real_max_depth) {
883                 bqt->max_depth = new_depth;
884                 return 0;
885         }
886
887         /*
888          * save the old state info, so we can copy it back
889          */
890         tag_index = bqt->tag_index;
891         tag_map = bqt->tag_map;
892         max_depth = bqt->real_max_depth;
893
894         if (init_tag_map(q, bqt, new_depth))
895                 return -ENOMEM;
896
897         memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
898         bits = max_depth / BLK_TAGS_PER_LONG;
899         memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
900
901         kfree(tag_index);
902         kfree(tag_map);
903         return 0;
904 }
905
906 EXPORT_SYMBOL(blk_queue_resize_tags);
907
908 /**
909  * blk_queue_end_tag - end tag operations for a request
910  * @q:  the request queue for the device
911  * @rq: the request that has completed
912  *
913  *  Description:
914  *    Typically called when end_that_request_first() returns 0, meaning
915  *    all transfers have been done for a request. It's important to call
916  *    this function before end_that_request_last(), as that will put the
917  *    request back on the free list thus corrupting the internal tag list.
918  *
919  *  Notes:
920  *   queue lock must be held.
921  **/
922 void blk_queue_end_tag(request_queue_t *q, struct request *rq)
923 {
924         struct blk_queue_tag *bqt = q->queue_tags;
925         int tag = rq->tag;
926
927         BUG_ON(tag == -1);
928
929         if (unlikely(tag >= bqt->real_max_depth))
930                 return;
931
932         if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
933                 printk("attempt to clear non-busy tag (%d)\n", tag);
934                 return;
935         }
936
937         list_del_init(&rq->queuelist);
938         rq->flags &= ~REQ_QUEUED;
939         rq->tag = -1;
940
941         if (unlikely(bqt->tag_index[tag] == NULL))
942                 printk("tag %d is missing\n", tag);
943
944         bqt->tag_index[tag] = NULL;
945         bqt->busy--;
946 }
947
948 EXPORT_SYMBOL(blk_queue_end_tag);
949
950 /**
951  * blk_queue_start_tag - find a free tag and assign it
952  * @q:  the request queue for the device
953  * @rq:  the block request that needs tagging
954  *
955  *  Description:
956  *    This can either be used as a stand-alone helper, or possibly be
957  *    assigned as the queue &prep_rq_fn (in which case &struct request
958  *    automagically gets a tag assigned). Note that this function
959  *    assumes that any type of request can be queued! if this is not
960  *    true for your device, you must check the request type before
961  *    calling this function.  The request will also be removed from
962  *    the request queue, so it's the drivers responsibility to readd
963  *    it if it should need to be restarted for some reason.
964  *
965  *  Notes:
966  *   queue lock must be held.
967  **/
968 int blk_queue_start_tag(request_queue_t *q, struct request *rq)
969 {
970         struct blk_queue_tag *bqt = q->queue_tags;
971         int tag;
972
973         if (unlikely((rq->flags & REQ_QUEUED))) {
974                 printk(KERN_ERR 
975                        "request %p for device [%s] already tagged %d",
976                        rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
977                 BUG();
978         }
979
980         tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
981         if (tag >= bqt->max_depth)
982                 return 1;
983
984         __set_bit(tag, bqt->tag_map);
985
986         rq->flags |= REQ_QUEUED;
987         rq->tag = tag;
988         bqt->tag_index[tag] = rq;
989         blkdev_dequeue_request(rq);
990         list_add(&rq->queuelist, &bqt->busy_list);
991         bqt->busy++;
992         return 0;
993 }
994
995 EXPORT_SYMBOL(blk_queue_start_tag);
996
997 /**
998  * blk_queue_invalidate_tags - invalidate all pending tags
999  * @q:  the request queue for the device
1000  *
1001  *  Description:
1002  *   Hardware conditions may dictate a need to stop all pending requests.
1003  *   In this case, we will safely clear the block side of the tag queue and
1004  *   readd all requests to the request queue in the right order.
1005  *
1006  *  Notes:
1007  *   queue lock must be held.
1008  **/
1009 void blk_queue_invalidate_tags(request_queue_t *q)
1010 {
1011         struct blk_queue_tag *bqt = q->queue_tags;
1012         struct list_head *tmp, *n;
1013         struct request *rq;
1014
1015         list_for_each_safe(tmp, n, &bqt->busy_list) {
1016                 rq = list_entry_rq(tmp);
1017
1018                 if (rq->tag == -1) {
1019                         printk("bad tag found on list\n");
1020                         list_del_init(&rq->queuelist);
1021                         rq->flags &= ~REQ_QUEUED;
1022                 } else
1023                         blk_queue_end_tag(q, rq);
1024
1025                 rq->flags &= ~REQ_STARTED;
1026                 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1027         }
1028 }
1029
1030 EXPORT_SYMBOL(blk_queue_invalidate_tags);
1031
1032 static char *rq_flags[] = {
1033         "REQ_RW",
1034         "REQ_FAILFAST",
1035         "REQ_SOFTBARRIER",
1036         "REQ_HARDBARRIER",
1037         "REQ_CMD",
1038         "REQ_NOMERGE",
1039         "REQ_STARTED",
1040         "REQ_DONTPREP",
1041         "REQ_QUEUED",
1042         "REQ_PC",
1043         "REQ_BLOCK_PC",
1044         "REQ_SENSE",
1045         "REQ_FAILED",
1046         "REQ_QUIET",
1047         "REQ_SPECIAL",
1048         "REQ_DRIVE_CMD",
1049         "REQ_DRIVE_TASK",
1050         "REQ_DRIVE_TASKFILE",
1051         "REQ_PREEMPT",
1052         "REQ_PM_SUSPEND",
1053         "REQ_PM_RESUME",
1054         "REQ_PM_SHUTDOWN",
1055 };
1056
1057 void blk_dump_rq_flags(struct request *rq, char *msg)
1058 {
1059         int bit;
1060
1061         printk("%s: dev %s: flags = ", msg,
1062                 rq->rq_disk ? rq->rq_disk->disk_name : "?");
1063         bit = 0;
1064         do {
1065                 if (rq->flags & (1 << bit))
1066                         printk("%s ", rq_flags[bit]);
1067                 bit++;
1068         } while (bit < __REQ_NR_BITS);
1069
1070         printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1071                                                        rq->nr_sectors,
1072                                                        rq->current_nr_sectors);
1073         printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1074
1075         if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
1076                 printk("cdb: ");
1077                 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1078                         printk("%02x ", rq->cmd[bit]);
1079                 printk("\n");
1080         }
1081 }
1082
1083 EXPORT_SYMBOL(blk_dump_rq_flags);
1084
1085 void blk_recount_segments(request_queue_t *q, struct bio *bio)
1086 {
1087         struct bio_vec *bv, *bvprv = NULL;
1088         int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
1089         int high, highprv = 1;
1090
1091         if (unlikely(!bio->bi_io_vec))
1092                 return;
1093
1094         cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1095         hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
1096         bio_for_each_segment(bv, bio, i) {
1097                 /*
1098                  * the trick here is making sure that a high page is never
1099                  * considered part of another segment, since that might
1100                  * change with the bounce page.
1101                  */
1102                 high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
1103                 if (high || highprv)
1104                         goto new_hw_segment;
1105                 if (cluster) {
1106                         if (seg_size + bv->bv_len > q->max_segment_size)
1107                                 goto new_segment;
1108                         if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
1109                                 goto new_segment;
1110                         if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
1111                                 goto new_segment;
1112                         if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
1113                                 goto new_hw_segment;
1114
1115                         seg_size += bv->bv_len;
1116                         hw_seg_size += bv->bv_len;
1117                         bvprv = bv;
1118                         continue;
1119                 }
1120 new_segment:
1121                 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
1122                     !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
1123                         hw_seg_size += bv->bv_len;
1124                 } else {
1125 new_hw_segment:
1126                         if (hw_seg_size > bio->bi_hw_front_size)
1127                                 bio->bi_hw_front_size = hw_seg_size;
1128                         hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
1129                         nr_hw_segs++;
1130                 }
1131
1132                 nr_phys_segs++;
1133                 bvprv = bv;
1134                 seg_size = bv->bv_len;
1135                 highprv = high;
1136         }
1137         if (hw_seg_size > bio->bi_hw_back_size)
1138                 bio->bi_hw_back_size = hw_seg_size;
1139         if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
1140                 bio->bi_hw_front_size = hw_seg_size;
1141         bio->bi_phys_segments = nr_phys_segs;
1142         bio->bi_hw_segments = nr_hw_segs;
1143         bio->bi_flags |= (1 << BIO_SEG_VALID);
1144 }
1145
1146
1147 int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1148                                    struct bio *nxt)
1149 {
1150         if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
1151                 return 0;
1152
1153         if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
1154                 return 0;
1155         if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1156                 return 0;
1157
1158         /*
1159          * bio and nxt are contigous in memory, check if the queue allows
1160          * these two to be merged into one
1161          */
1162         if (BIO_SEG_BOUNDARY(q, bio, nxt))
1163                 return 1;
1164
1165         return 0;
1166 }
1167
1168 EXPORT_SYMBOL(blk_phys_contig_segment);
1169
1170 int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1171                                  struct bio *nxt)
1172 {
1173         if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1174                 blk_recount_segments(q, bio);
1175         if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
1176                 blk_recount_segments(q, nxt);
1177         if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
1178             BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
1179                 return 0;
1180         if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1181                 return 0;
1182
1183         return 1;
1184 }
1185
1186 EXPORT_SYMBOL(blk_hw_contig_segment);
1187
1188 /*
1189  * map a request to scatterlist, return number of sg entries setup. Caller
1190  * must make sure sg can hold rq->nr_phys_segments entries
1191  */
1192 int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
1193 {
1194         struct bio_vec *bvec, *bvprv;
1195         struct bio *bio;
1196         int nsegs, i, cluster;
1197
1198         nsegs = 0;
1199         cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1200
1201         /*
1202          * for each bio in rq
1203          */
1204         bvprv = NULL;
1205         rq_for_each_bio(bio, rq) {
1206                 /*
1207                  * for each segment in bio
1208                  */
1209                 bio_for_each_segment(bvec, bio, i) {
1210                         int nbytes = bvec->bv_len;
1211
1212                         if (bvprv && cluster) {
1213                                 if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
1214                                         goto new_segment;
1215
1216                                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
1217                                         goto new_segment;
1218                                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1219                                         goto new_segment;
1220
1221                                 sg[nsegs - 1].length += nbytes;
1222                         } else {
1223 new_segment:
1224                                 memset(&sg[nsegs],0,sizeof(struct scatterlist));
1225                                 sg[nsegs].page = bvec->bv_page;
1226                                 sg[nsegs].length = nbytes;
1227                                 sg[nsegs].offset = bvec->bv_offset;
1228
1229                                 nsegs++;
1230                         }
1231                         bvprv = bvec;
1232                 } /* segments in bio */
1233         } /* bios in rq */
1234
1235         return nsegs;
1236 }
1237
1238 EXPORT_SYMBOL(blk_rq_map_sg);
1239
1240 /*
1241  * the standard queue merge functions, can be overridden with device
1242  * specific ones if so desired
1243  */
1244
1245 static inline int ll_new_mergeable(request_queue_t *q,
1246                                    struct request *req,
1247                                    struct bio *bio)
1248 {
1249         int nr_phys_segs = bio_phys_segments(q, bio);
1250
1251         if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1252                 req->flags |= REQ_NOMERGE;
1253                 if (req == q->last_merge)
1254                         q->last_merge = NULL;
1255                 return 0;
1256         }
1257
1258         /*
1259          * A hw segment is just getting larger, bump just the phys
1260          * counter.
1261          */
1262         req->nr_phys_segments += nr_phys_segs;
1263         return 1;
1264 }
1265
1266 static inline int ll_new_hw_segment(request_queue_t *q,
1267                                     struct request *req,
1268                                     struct bio *bio)
1269 {
1270         int nr_hw_segs = bio_hw_segments(q, bio);
1271         int nr_phys_segs = bio_phys_segments(q, bio);
1272
1273         if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
1274             || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1275                 req->flags |= REQ_NOMERGE;
1276                 if (req == q->last_merge)
1277                         q->last_merge = NULL;
1278                 return 0;
1279         }
1280
1281         /*
1282          * This will form the start of a new hw segment.  Bump both
1283          * counters.
1284          */
1285         req->nr_hw_segments += nr_hw_segs;
1286         req->nr_phys_segments += nr_phys_segs;
1287         return 1;
1288 }
1289
1290 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
1291                             struct bio *bio)
1292 {
1293         int len;
1294
1295         if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
1296                 req->flags |= REQ_NOMERGE;
1297                 if (req == q->last_merge)
1298                         q->last_merge = NULL;
1299                 return 0;
1300         }
1301         if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
1302                 blk_recount_segments(q, req->biotail);
1303         if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1304                 blk_recount_segments(q, bio);
1305         len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
1306         if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
1307             !BIOVEC_VIRT_OVERSIZE(len)) {
1308                 int mergeable =  ll_new_mergeable(q, req, bio);
1309
1310                 if (mergeable) {
1311                         if (req->nr_hw_segments == 1)
1312                                 req->bio->bi_hw_front_size = len;
1313                         if (bio->bi_hw_segments == 1)
1314                                 bio->bi_hw_back_size = len;
1315                 }
1316                 return mergeable;
1317         }
1318
1319         return ll_new_hw_segment(q, req, bio);
1320 }
1321
1322 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
1323                              struct bio *bio)
1324 {
1325         int len;
1326
1327         if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
1328                 req->flags |= REQ_NOMERGE;
1329                 if (req == q->last_merge)
1330                         q->last_merge = NULL;
1331                 return 0;
1332         }
1333         len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
1334         if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1335                 blk_recount_segments(q, bio);
1336         if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
1337                 blk_recount_segments(q, req->bio);
1338         if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
1339             !BIOVEC_VIRT_OVERSIZE(len)) {
1340                 int mergeable =  ll_new_mergeable(q, req, bio);
1341
1342                 if (mergeable) {
1343                         if (bio->bi_hw_segments == 1)
1344                                 bio->bi_hw_front_size = len;
1345                         if (req->nr_hw_segments == 1)
1346                                 req->biotail->bi_hw_back_size = len;
1347                 }
1348                 return mergeable;
1349         }
1350
1351         return ll_new_hw_segment(q, req, bio);
1352 }
1353
1354 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
1355                                 struct request *next)
1356 {
1357         int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
1358         int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
1359
1360         /*
1361          * First check if the either of the requests are re-queued
1362          * requests.  Can't merge them if they are.
1363          */
1364         if (req->special || next->special)
1365                 return 0;
1366
1367         /*
1368          * Will it become to large?
1369          */
1370         if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
1371                 return 0;
1372
1373         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
1374         if (blk_phys_contig_segment(q, req->biotail, next->bio))
1375                 total_phys_segments--;
1376
1377         if (total_phys_segments > q->max_phys_segments)
1378                 return 0;
1379
1380         total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
1381         if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
1382                 int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
1383                 /*
1384                  * propagate the combined length to the end of the requests
1385                  */
1386                 if (req->nr_hw_segments == 1)
1387                         req->bio->bi_hw_front_size = len;
1388                 if (next->nr_hw_segments == 1)
1389                         next->biotail->bi_hw_back_size = len;
1390                 total_hw_segments--;
1391         }
1392
1393         if (total_hw_segments > q->max_hw_segments)
1394                 return 0;
1395
1396         /* Merge is OK... */
1397         req->nr_phys_segments = total_phys_segments;
1398         req->nr_hw_segments = total_hw_segments;
1399         return 1;
1400 }
1401
1402 /*
1403  * "plug" the device if there are no outstanding requests: this will
1404  * force the transfer to start only after we have put all the requests
1405  * on the list.
1406  *
1407  * This is called with interrupts off and no requests on the queue and
1408  * with the queue lock held.
1409  */
1410 void blk_plug_device(request_queue_t *q)
1411 {
1412         WARN_ON(!irqs_disabled());
1413
1414         /*
1415          * don't plug a stopped queue, it must be paired with blk_start_queue()
1416          * which will restart the queueing
1417          */
1418         if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
1419                 return;
1420
1421         if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1422                 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
1423 }
1424
1425 EXPORT_SYMBOL(blk_plug_device);
1426
1427 /*
1428  * remove the queue from the plugged list, if present. called with
1429  * queue lock held and interrupts disabled.
1430  */
1431 int blk_remove_plug(request_queue_t *q)
1432 {
1433         WARN_ON(!irqs_disabled());
1434
1435         if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1436                 return 0;
1437
1438         del_timer(&q->unplug_timer);
1439         return 1;
1440 }
1441
1442 EXPORT_SYMBOL(blk_remove_plug);
1443
1444 /*
1445  * remove the plug and let it rip..
1446  */
1447 void __generic_unplug_device(request_queue_t *q)
1448 {
1449         if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
1450                 return;
1451
1452         if (!blk_remove_plug(q))
1453                 return;
1454
1455         /*
1456          * was plugged, fire request_fn if queue has stuff to do
1457          */
1458         if (elv_next_request(q))
1459                 q->request_fn(q);
1460 }
1461 EXPORT_SYMBOL(__generic_unplug_device);
1462
1463 /**
1464  * generic_unplug_device - fire a request queue
1465  * @q:    The &request_queue_t in question
1466  *
1467  * Description:
1468  *   Linux uses plugging to build bigger requests queues before letting
1469  *   the device have at them. If a queue is plugged, the I/O scheduler
1470  *   is still adding and merging requests on the queue. Once the queue
1471  *   gets unplugged, the request_fn defined for the queue is invoked and
1472  *   transfers started.
1473  **/
1474 void generic_unplug_device(request_queue_t *q)
1475 {
1476         spin_lock_irq(q->queue_lock);
1477         __generic_unplug_device(q);
1478         spin_unlock_irq(q->queue_lock);
1479 }
1480 EXPORT_SYMBOL(generic_unplug_device);
1481
1482 static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1483                                    struct page *page)
1484 {
1485         request_queue_t *q = bdi->unplug_io_data;
1486
1487         /*
1488          * devices don't necessarily have an ->unplug_fn defined
1489          */
1490         if (q->unplug_fn)
1491                 q->unplug_fn(q);
1492 }
1493
1494 static void blk_unplug_work(void *data)
1495 {
1496         request_queue_t *q = data;
1497
1498         q->unplug_fn(q);
1499 }
1500
1501 static void blk_unplug_timeout(unsigned long data)
1502 {
1503         request_queue_t *q = (request_queue_t *)data;
1504
1505         kblockd_schedule_work(&q->unplug_work);
1506 }
1507
1508 /**
1509  * blk_start_queue - restart a previously stopped queue
1510  * @q:    The &request_queue_t in question
1511  *
1512  * Description:
1513  *   blk_start_queue() will clear the stop flag on the queue, and call
1514  *   the request_fn for the queue if it was in a stopped state when
1515  *   entered. Also see blk_stop_queue(). Queue lock must be held.
1516  **/
1517 void blk_start_queue(request_queue_t *q)
1518 {
1519         clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1520
1521         /*
1522          * one level of recursion is ok and is much faster than kicking
1523          * the unplug handling
1524          */
1525         if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
1526                 q->request_fn(q);
1527                 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
1528         } else {
1529                 blk_plug_device(q);
1530                 kblockd_schedule_work(&q->unplug_work);
1531         }
1532 }
1533
1534 EXPORT_SYMBOL(blk_start_queue);
1535
1536 /**
1537  * blk_stop_queue - stop a queue
1538  * @q:    The &request_queue_t in question
1539  *
1540  * Description:
1541  *   The Linux block layer assumes that a block driver will consume all
1542  *   entries on the request queue when the request_fn strategy is called.
1543  *   Often this will not happen, because of hardware limitations (queue
1544  *   depth settings). If a device driver gets a 'queue full' response,
1545  *   or if it simply chooses not to queue more I/O at one point, it can
1546  *   call this function to prevent the request_fn from being called until
1547  *   the driver has signalled it's ready to go again. This happens by calling
1548  *   blk_start_queue() to restart queue operations. Queue lock must be held.
1549  **/
1550 void blk_stop_queue(request_queue_t *q)
1551 {
1552         blk_remove_plug(q);
1553         set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1554 }
1555 EXPORT_SYMBOL(blk_stop_queue);
1556
1557 /**
1558  * blk_sync_queue - cancel any pending callbacks on a queue
1559  * @q: the queue
1560  *
1561  * Description:
1562  *     The block layer may perform asynchronous callback activity
1563  *     on a queue, such as calling the unplug function after a timeout.
1564  *     A block device may call blk_sync_queue to ensure that any
1565  *     such activity is cancelled, thus allowing it to release resources
1566  *     the the callbacks might use. The caller must already have made sure
1567  *     that its ->make_request_fn will not re-add plugging prior to calling
1568  *     this function.
1569  *
1570  */
1571 void blk_sync_queue(struct request_queue *q)
1572 {
1573         del_timer_sync(&q->unplug_timer);
1574         kblockd_flush();
1575 }
1576 EXPORT_SYMBOL(blk_sync_queue);
1577
1578 /**
1579  * blk_run_queue - run a single device queue
1580  * @q:  The queue to run
1581  */
1582 void blk_run_queue(struct request_queue *q)
1583 {
1584         unsigned long flags;
1585
1586         spin_lock_irqsave(q->queue_lock, flags);
1587         blk_remove_plug(q);
1588         if (!elv_queue_empty(q))
1589                 q->request_fn(q);
1590         spin_unlock_irqrestore(q->queue_lock, flags);
1591 }
1592 EXPORT_SYMBOL(blk_run_queue);
1593
1594 /**
1595  * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
1596  * @q:    the request queue to be released
1597  *
1598  * Description:
1599  *     blk_cleanup_queue is the pair to blk_init_queue() or
1600  *     blk_queue_make_request().  It should be called when a request queue is
1601  *     being released; typically when a block device is being de-registered.
1602  *     Currently, its primary task it to free all the &struct request
1603  *     structures that were allocated to the queue and the queue itself.
1604  *
1605  * Caveat:
1606  *     Hopefully the low level driver will have finished any
1607  *     outstanding requests first...
1608  **/
1609 void blk_cleanup_queue(request_queue_t * q)
1610 {
1611         struct request_list *rl = &q->rq;
1612
1613         if (!atomic_dec_and_test(&q->refcnt))
1614                 return;
1615
1616         if (q->elevator)
1617                 elevator_exit(q->elevator);
1618
1619         blk_sync_queue(q);
1620
1621         if (rl->rq_pool)
1622                 mempool_destroy(rl->rq_pool);
1623
1624         if (q->queue_tags)
1625                 __blk_queue_free_tags(q);
1626
1627         blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1628
1629         kmem_cache_free(requestq_cachep, q);
1630 }
1631
1632 EXPORT_SYMBOL(blk_cleanup_queue);
1633
1634 static int blk_init_free_list(request_queue_t *q)
1635 {
1636         struct request_list *rl = &q->rq;
1637
1638         rl->count[READ] = rl->count[WRITE] = 0;
1639         rl->starved[READ] = rl->starved[WRITE] = 0;
1640         init_waitqueue_head(&rl->wait[READ]);
1641         init_waitqueue_head(&rl->wait[WRITE]);
1642         init_waitqueue_head(&rl->drain);
1643
1644         rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1645                                 mempool_free_slab, request_cachep, q->node);
1646
1647         if (!rl->rq_pool)
1648                 return -ENOMEM;
1649
1650         return 0;
1651 }
1652
1653 static int __make_request(request_queue_t *, struct bio *);
1654
1655 request_queue_t *blk_alloc_queue(int gfp_mask)
1656 {
1657         return blk_alloc_queue_node(gfp_mask, -1);
1658 }
1659 EXPORT_SYMBOL(blk_alloc_queue);
1660
1661 request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id)
1662 {
1663         request_queue_t *q;
1664
1665         q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
1666         if (!q)
1667                 return NULL;
1668
1669         memset(q, 0, sizeof(*q));
1670         init_timer(&q->unplug_timer);
1671         atomic_set(&q->refcnt, 1);
1672
1673         q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
1674         q->backing_dev_info.unplug_io_data = q;
1675
1676         return q;
1677 }
1678 EXPORT_SYMBOL(blk_alloc_queue_node);
1679
1680 /**
1681  * blk_init_queue  - prepare a request queue for use with a block device
1682  * @rfn:  The function to be called to process requests that have been
1683  *        placed on the queue.
1684  * @lock: Request queue spin lock
1685  *
1686  * Description:
1687  *    If a block device wishes to use the standard request handling procedures,
1688  *    which sorts requests and coalesces adjacent requests, then it must
1689  *    call blk_init_queue().  The function @rfn will be called when there
1690  *    are requests on the queue that need to be processed.  If the device
1691  *    supports plugging, then @rfn may not be called immediately when requests
1692  *    are available on the queue, but may be called at some time later instead.
1693  *    Plugged queues are generally unplugged when a buffer belonging to one
1694  *    of the requests on the queue is needed, or due to memory pressure.
1695  *
1696  *    @rfn is not required, or even expected, to remove all requests off the
1697  *    queue, but only as many as it can handle at a time.  If it does leave
1698  *    requests on the queue, it is responsible for arranging that the requests
1699  *    get dealt with eventually.
1700  *
1701  *    The queue spin lock must be held while manipulating the requests on the
1702  *    request queue.
1703  *
1704  *    Function returns a pointer to the initialized request queue, or NULL if
1705  *    it didn't succeed.
1706  *
1707  * Note:
1708  *    blk_init_queue() must be paired with a blk_cleanup_queue() call
1709  *    when the block device is deactivated (such as at module unload).
1710  **/
1711
1712 request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1713 {
1714         return blk_init_queue_node(rfn, lock, -1);
1715 }
1716 EXPORT_SYMBOL(blk_init_queue);
1717
1718 request_queue_t *
1719 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1720 {
1721         request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1722
1723         if (!q)
1724                 return NULL;
1725
1726         q->node = node_id;
1727         if (blk_init_free_list(q))
1728                 goto out_init;
1729
1730         /*
1731          * if caller didn't supply a lock, they get per-queue locking with
1732          * our embedded lock
1733          */
1734         if (!lock) {
1735                 spin_lock_init(&q->__queue_lock);
1736                 lock = &q->__queue_lock;
1737         }
1738
1739         q->request_fn           = rfn;
1740         q->back_merge_fn        = ll_back_merge_fn;
1741         q->front_merge_fn       = ll_front_merge_fn;
1742         q->merge_requests_fn    = ll_merge_requests_fn;
1743         q->prep_rq_fn           = NULL;
1744         q->unplug_fn            = generic_unplug_device;
1745         q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
1746         q->queue_lock           = lock;
1747
1748         blk_queue_segment_boundary(q, 0xffffffff);
1749
1750         blk_queue_make_request(q, __make_request);
1751         blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
1752
1753         blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
1754         blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
1755
1756         /*
1757          * all done
1758          */
1759         if (!elevator_init(q, NULL)) {
1760                 blk_queue_congestion_threshold(q);
1761                 return q;
1762         }
1763
1764         blk_cleanup_queue(q);
1765 out_init:
1766         kmem_cache_free(requestq_cachep, q);
1767         return NULL;
1768 }
1769 EXPORT_SYMBOL(blk_init_queue_node);
1770
1771 int blk_get_queue(request_queue_t *q)
1772 {
1773         if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
1774                 atomic_inc(&q->refcnt);
1775                 return 0;
1776         }
1777
1778         return 1;
1779 }
1780
1781 EXPORT_SYMBOL(blk_get_queue);
1782
1783 static inline void blk_free_request(request_queue_t *q, struct request *rq)
1784 {
1785         elv_put_request(q, rq);
1786         mempool_free(rq, q->rq.rq_pool);
1787 }
1788
1789 static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
1790                                                 int gfp_mask)
1791 {
1792         struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1793
1794         if (!rq)
1795                 return NULL;
1796
1797         /*
1798          * first three bits are identical in rq->flags and bio->bi_rw,
1799          * see bio.h and blkdev.h
1800          */
1801         rq->flags = rw;
1802
1803         if (!elv_set_request(q, rq, gfp_mask))
1804                 return rq;
1805
1806         mempool_free(rq, q->rq.rq_pool);
1807         return NULL;
1808 }
1809
1810 /*
1811  * ioc_batching returns true if the ioc is a valid batching request and
1812  * should be given priority access to a request.
1813  */
1814 static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
1815 {
1816         if (!ioc)
1817                 return 0;
1818
1819         /*
1820          * Make sure the process is able to allocate at least 1 request
1821          * even if the batch times out, otherwise we could theoretically
1822          * lose wakeups.
1823          */
1824         return ioc->nr_batch_requests == q->nr_batching ||
1825                 (ioc->nr_batch_requests > 0
1826                 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1827 }
1828
1829 /*
1830  * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1831  * will cause the process to be a "batcher" on all queues in the system. This
1832  * is the behaviour we want though - once it gets a wakeup it should be given
1833  * a nice run.
1834  */
1835 void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
1836 {
1837         if (!ioc || ioc_batching(q, ioc))
1838                 return;
1839
1840         ioc->nr_batch_requests = q->nr_batching;
1841         ioc->last_waited = jiffies;
1842 }
1843
1844 static void __freed_request(request_queue_t *q, int rw)
1845 {
1846         struct request_list *rl = &q->rq;
1847
1848         if (rl->count[rw] < queue_congestion_off_threshold(q))
1849                 clear_queue_congested(q, rw);
1850
1851         if (rl->count[rw] + 1 <= q->nr_requests) {
1852                 smp_mb();
1853                 if (waitqueue_active(&rl->wait[rw]))
1854                         wake_up(&rl->wait[rw]);
1855
1856                 blk_clear_queue_full(q, rw);
1857         }
1858 }
1859
1860 /*
1861  * A request has just been released.  Account for it, update the full and
1862  * congestion status, wake up any waiters.   Called under q->queue_lock.
1863  */
1864 static void freed_request(request_queue_t *q, int rw)
1865 {
1866         struct request_list *rl = &q->rq;
1867
1868         rl->count[rw]--;
1869
1870         __freed_request(q, rw);
1871
1872         if (unlikely(rl->starved[rw ^ 1]))
1873                 __freed_request(q, rw ^ 1);
1874
1875         if (!rl->count[READ] && !rl->count[WRITE]) {
1876                 smp_mb();
1877                 if (unlikely(waitqueue_active(&rl->drain)))
1878                         wake_up(&rl->drain);
1879         }
1880 }
1881
1882 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
1883 /*
1884  * Get a free request, queue_lock must not be held
1885  */
1886 static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
1887 {
1888         struct request *rq = NULL;
1889         struct request_list *rl = &q->rq;
1890         struct io_context *ioc = get_io_context(gfp_mask);
1891
1892         if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1893                 goto out;
1894
1895         spin_lock_irq(q->queue_lock);
1896         if (rl->count[rw]+1 >= q->nr_requests) {
1897                 /*
1898                  * The queue will fill after this allocation, so set it as
1899                  * full, and mark this process as "batching". This process
1900                  * will be allowed to complete a batch of requests, others
1901                  * will be blocked.
1902                  */
1903                 if (!blk_queue_full(q, rw)) {
1904                         ioc_set_batching(q, ioc);
1905                         blk_set_queue_full(q, rw);
1906                 }
1907         }
1908
1909         switch (elv_may_queue(q, rw)) {
1910                 case ELV_MQUEUE_NO:
1911                         goto rq_starved;
1912                 case ELV_MQUEUE_MAY:
1913                         break;
1914                 case ELV_MQUEUE_MUST:
1915                         goto get_rq;
1916         }
1917
1918         if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
1919                 /*
1920                  * The queue is full and the allocating process is not a
1921                  * "batcher", and not exempted by the IO scheduler
1922                  */
1923                 spin_unlock_irq(q->queue_lock);
1924                 goto out;
1925         }
1926
1927 get_rq:
1928         rl->count[rw]++;
1929         rl->starved[rw] = 0;
1930         if (rl->count[rw] >= queue_congestion_on_threshold(q))
1931                 set_queue_congested(q, rw);
1932         spin_unlock_irq(q->queue_lock);
1933
1934         rq = blk_alloc_request(q, rw, gfp_mask);
1935         if (!rq) {
1936                 /*
1937                  * Allocation failed presumably due to memory. Undo anything
1938                  * we might have messed up.
1939                  *
1940                  * Allocating task should really be put onto the front of the
1941                  * wait queue, but this is pretty rare.
1942                  */
1943                 spin_lock_irq(q->queue_lock);
1944                 freed_request(q, rw);
1945
1946                 /*
1947                  * in the very unlikely event that allocation failed and no
1948                  * requests for this direction was pending, mark us starved
1949                  * so that freeing of a request in the other direction will
1950                  * notice us. another possible fix would be to split the
1951                  * rq mempool into READ and WRITE
1952                  */
1953 rq_starved:
1954                 if (unlikely(rl->count[rw] == 0))
1955                         rl->starved[rw] = 1;
1956
1957                 spin_unlock_irq(q->queue_lock);
1958                 goto out;
1959         }
1960
1961         if (ioc_batching(q, ioc))
1962                 ioc->nr_batch_requests--;
1963         
1964         rq_init(q, rq);
1965         rq->rl = rl;
1966 out:
1967         put_io_context(ioc);
1968         return rq;
1969 }
1970
1971 /*
1972  * No available requests for this queue, unplug the device and wait for some
1973  * requests to become available.
1974  */
1975 static struct request *get_request_wait(request_queue_t *q, int rw)
1976 {
1977         DEFINE_WAIT(wait);
1978         struct request *rq;
1979
1980         generic_unplug_device(q);
1981         do {
1982                 struct request_list *rl = &q->rq;
1983
1984                 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
1985                                 TASK_UNINTERRUPTIBLE);
1986
1987                 rq = get_request(q, rw, GFP_NOIO);
1988
1989                 if (!rq) {
1990                         struct io_context *ioc;
1991
1992                         io_schedule();
1993
1994                         /*
1995                          * After sleeping, we become a "batching" process and
1996                          * will be able to allocate at least one request, and
1997                          * up to a big batch of them for a small period time.
1998                          * See ioc_batching, ioc_set_batching
1999                          */
2000                         ioc = get_io_context(GFP_NOIO);
2001                         ioc_set_batching(q, ioc);
2002                         put_io_context(ioc);
2003                 }
2004                 finish_wait(&rl->wait[rw], &wait);
2005         } while (!rq);
2006
2007         return rq;
2008 }
2009
2010 struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
2011 {
2012         struct request *rq;
2013
2014         BUG_ON(rw != READ && rw != WRITE);
2015
2016         if (gfp_mask & __GFP_WAIT)
2017                 rq = get_request_wait(q, rw);
2018         else
2019                 rq = get_request(q, rw, gfp_mask);
2020
2021         return rq;
2022 }
2023
2024 EXPORT_SYMBOL(blk_get_request);
2025
2026 /**
2027  * blk_requeue_request - put a request back on queue
2028  * @q:          request queue where request should be inserted
2029  * @rq:         request to be inserted
2030  *
2031  * Description:
2032  *    Drivers often keep queueing requests until the hardware cannot accept
2033  *    more, when that condition happens we need to put the request back
2034  *    on the queue. Must be called with queue lock held.
2035  */
2036 void blk_requeue_request(request_queue_t *q, struct request *rq)
2037 {
2038         if (blk_rq_tagged(rq))
2039                 blk_queue_end_tag(q, rq);
2040
2041         elv_requeue_request(q, rq);
2042 }
2043
2044 EXPORT_SYMBOL(blk_requeue_request);
2045
2046 /**
2047  * blk_insert_request - insert a special request in to a request queue
2048  * @q:          request queue where request should be inserted
2049  * @rq:         request to be inserted
2050  * @at_head:    insert request at head or tail of queue
2051  * @data:       private data
2052  *
2053  * Description:
2054  *    Many block devices need to execute commands asynchronously, so they don't
2055  *    block the whole kernel from preemption during request execution.  This is
2056  *    accomplished normally by inserting aritficial requests tagged as
2057  *    REQ_SPECIAL in to the corresponding request queue, and letting them be
2058  *    scheduled for actual execution by the request queue.
2059  *
2060  *    We have the option of inserting the head or the tail of the queue.
2061  *    Typically we use the tail for new ioctls and so forth.  We use the head
2062  *    of the queue for things like a QUEUE_FULL message from a device, or a
2063  *    host that is unable to accept a particular command.
2064  */
2065 void blk_insert_request(request_queue_t *q, struct request *rq,
2066                         int at_head, void *data)
2067 {
2068         int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
2069         unsigned long flags;
2070
2071         /*
2072          * tell I/O scheduler that this isn't a regular read/write (ie it
2073          * must not attempt merges on this) and that it acts as a soft
2074          * barrier
2075          */
2076         rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
2077
2078         rq->special = data;
2079
2080         spin_lock_irqsave(q->queue_lock, flags);
2081
2082         /*
2083          * If command is tagged, release the tag
2084          */
2085         if (blk_rq_tagged(rq))
2086                 blk_queue_end_tag(q, rq);
2087
2088         drive_stat_acct(rq, rq->nr_sectors, 1);
2089         __elv_add_request(q, rq, where, 0);
2090
2091         if (blk_queue_plugged(q))
2092                 __generic_unplug_device(q);
2093         else
2094                 q->request_fn(q);
2095         spin_unlock_irqrestore(q->queue_lock, flags);
2096 }
2097
2098 EXPORT_SYMBOL(blk_insert_request);
2099
2100 /**
2101  * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2102  * @q:          request queue where request should be inserted
2103  * @rw:         READ or WRITE data
2104  * @ubuf:       the user buffer
2105  * @len:        length of user data
2106  *
2107  * Description:
2108  *    Data will be mapped directly for zero copy io, if possible. Otherwise
2109  *    a kernel bounce buffer is used.
2110  *
2111  *    A matching blk_rq_unmap_user() must be issued at the end of io, while
2112  *    still in process context.
2113  *
2114  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
2115  *    before being submitted to the device, as pages mapped may be out of
2116  *    reach. It's the callers responsibility to make sure this happens. The
2117  *    original bio must be passed back in to blk_rq_unmap_user() for proper
2118  *    unmapping.
2119  */
2120 struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2121                                 unsigned int len)
2122 {
2123         unsigned long uaddr;
2124         struct request *rq;
2125         struct bio *bio;
2126
2127         if (len > (q->max_sectors << 9))
2128                 return ERR_PTR(-EINVAL);
2129         if ((!len && ubuf) || (len && !ubuf))
2130                 return ERR_PTR(-EINVAL);
2131
2132         rq = blk_get_request(q, rw, __GFP_WAIT);
2133         if (!rq)
2134                 return ERR_PTR(-ENOMEM);
2135
2136         /*
2137          * if alignment requirement is satisfied, map in user pages for
2138          * direct dma. else, set up kernel bounce buffers
2139          */
2140         uaddr = (unsigned long) ubuf;
2141         if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2142                 bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
2143         else
2144                 bio = bio_copy_user(q, uaddr, len, rw == READ);
2145
2146         if (!IS_ERR(bio)) {
2147                 rq->bio = rq->biotail = bio;
2148                 blk_rq_bio_prep(q, rq, bio);
2149
2150                 rq->buffer = rq->data = NULL;
2151                 rq->data_len = len;
2152                 return rq;
2153         }
2154
2155         /*
2156          * bio is the err-ptr
2157          */
2158         blk_put_request(rq);
2159         return (struct request *) bio;
2160 }
2161
2162 EXPORT_SYMBOL(blk_rq_map_user);
2163
2164 /**
2165  * blk_rq_unmap_user - unmap a request with user data
2166  * @rq:         request to be unmapped
2167  * @bio:        bio for the request
2168  * @ulen:       length of user buffer
2169  *
2170  * Description:
2171  *    Unmap a request previously mapped by blk_rq_map_user().
2172  */
2173 int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2174 {
2175         int ret = 0;
2176
2177         if (bio) {
2178                 if (bio_flagged(bio, BIO_USER_MAPPED))
2179                         bio_unmap_user(bio);
2180                 else
2181                         ret = bio_uncopy_user(bio);
2182         }
2183
2184         blk_put_request(rq);
2185         return ret;
2186 }
2187
2188 EXPORT_SYMBOL(blk_rq_unmap_user);
2189
2190 /**
2191  * blk_execute_rq - insert a request into queue for execution
2192  * @q:          queue to insert the request in
2193  * @bd_disk:    matching gendisk
2194  * @rq:         request to insert
2195  *
2196  * Description:
2197  *    Insert a fully prepared request at the back of the io scheduler queue
2198  *    for execution.
2199  */
2200 int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2201                    struct request *rq)
2202 {
2203         DECLARE_COMPLETION(wait);
2204         char sense[SCSI_SENSE_BUFFERSIZE];
2205         int err = 0;
2206
2207         rq->rq_disk = bd_disk;
2208
2209         /*
2210          * we need an extra reference to the request, so we can look at
2211          * it after io completion
2212          */
2213         rq->ref_count++;
2214
2215         if (!rq->sense) {
2216                 memset(sense, 0, sizeof(sense));
2217                 rq->sense = sense;
2218                 rq->sense_len = 0;
2219         }
2220
2221         rq->flags |= REQ_NOMERGE;
2222         rq->waiting = &wait;
2223         rq->end_io = blk_end_sync_rq;
2224         elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2225         generic_unplug_device(q);
2226         wait_for_completion(&wait);
2227         rq->waiting = NULL;
2228
2229         if (rq->errors)
2230                 err = -EIO;
2231
2232         return err;
2233 }
2234
2235 EXPORT_SYMBOL(blk_execute_rq);
2236
2237 /**
2238  * blkdev_issue_flush - queue a flush
2239  * @bdev:       blockdev to issue flush for
2240  * @error_sector:       error sector
2241  *
2242  * Description:
2243  *    Issue a flush for the block device in question. Caller can supply
2244  *    room for storing the error offset in case of a flush error, if they
2245  *    wish to.  Caller must run wait_for_completion() on its own.
2246  */
2247 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2248 {
2249         request_queue_t *q;
2250
2251         if (bdev->bd_disk == NULL)
2252                 return -ENXIO;
2253
2254         q = bdev_get_queue(bdev);
2255         if (!q)
2256                 return -ENXIO;
2257         if (!q->issue_flush_fn)
2258                 return -EOPNOTSUPP;
2259
2260         return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
2261 }
2262
2263 EXPORT_SYMBOL(blkdev_issue_flush);
2264
2265 /**
2266  * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2267  * @q:          device queue
2268  * @disk:       gendisk
2269  * @error_sector:       error offset
2270  *
2271  * Description:
2272  *    Devices understanding the SCSI command set, can use this function as
2273  *    a helper for issuing a cache flush. Note: driver is required to store
2274  *    the error offset (in case of error flushing) in ->sector of struct
2275  *    request.
2276  */
2277 int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2278                                sector_t *error_sector)
2279 {
2280         struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2281         int ret;
2282
2283         rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2284         rq->sector = 0;
2285         memset(rq->cmd, 0, sizeof(rq->cmd));
2286         rq->cmd[0] = 0x35;
2287         rq->cmd_len = 12;
2288         rq->data = NULL;
2289         rq->data_len = 0;
2290         rq->timeout = 60 * HZ;
2291
2292         ret = blk_execute_rq(q, disk, rq);
2293
2294         if (ret && error_sector)
2295                 *error_sector = rq->sector;
2296
2297         blk_put_request(rq);
2298         return ret;
2299 }
2300
2301 EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2302
2303 void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2304 {
2305         int rw = rq_data_dir(rq);
2306
2307         if (!blk_fs_request(rq) || !rq->rq_disk)
2308                 return;
2309
2310         if (rw == READ) {
2311                 __disk_stat_add(rq->rq_disk, read_sectors, nr_sectors);
2312                 if (!new_io)
2313                         __disk_stat_inc(rq->rq_disk, read_merges);
2314         } else if (rw == WRITE) {
2315                 __disk_stat_add(rq->rq_disk, write_sectors, nr_sectors);
2316                 if (!new_io)
2317                         __disk_stat_inc(rq->rq_disk, write_merges);
2318         }
2319         if (new_io) {
2320                 disk_round_stats(rq->rq_disk);
2321                 rq->rq_disk->in_flight++;
2322         }
2323 }
2324
2325 /*
2326  * add-request adds a request to the linked list.
2327  * queue lock is held and interrupts disabled, as we muck with the
2328  * request queue list.
2329  */
2330 static inline void add_request(request_queue_t * q, struct request * req)
2331 {
2332         drive_stat_acct(req, req->nr_sectors, 1);
2333
2334         if (q->activity_fn)
2335                 q->activity_fn(q->activity_data, rq_data_dir(req));
2336
2337         /*
2338          * elevator indicated where it wants this request to be
2339          * inserted at elevator_merge time
2340          */
2341         __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
2342 }
2343  
2344 /*
2345  * disk_round_stats()   - Round off the performance stats on a struct
2346  * disk_stats.
2347  *
2348  * The average IO queue length and utilisation statistics are maintained
2349  * by observing the current state of the queue length and the amount of
2350  * time it has been in this state for.
2351  *
2352  * Normally, that accounting is done on IO completion, but that can result
2353  * in more than a second's worth of IO being accounted for within any one
2354  * second, leading to >100% utilisation.  To deal with that, we call this
2355  * function to do a round-off before returning the results when reading
2356  * /proc/diskstats.  This accounts immediately for all queue usage up to
2357  * the current jiffies and restarts the counters again.
2358  */
2359 void disk_round_stats(struct gendisk *disk)
2360 {
2361         unsigned long now = jiffies;
2362
2363         __disk_stat_add(disk, time_in_queue,
2364                         disk->in_flight * (now - disk->stamp));
2365         disk->stamp = now;
2366
2367         if (disk->in_flight)
2368                 __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
2369         disk->stamp_idle = now;
2370 }
2371
2372 /*
2373  * queue lock must be held
2374  */
2375 static void __blk_put_request(request_queue_t *q, struct request *req)
2376 {
2377         struct request_list *rl = req->rl;
2378
2379         if (unlikely(!q))
2380                 return;
2381         if (unlikely(--req->ref_count))
2382                 return;
2383
2384         req->rq_status = RQ_INACTIVE;
2385         req->q = NULL;
2386         req->rl = NULL;
2387
2388         /*
2389          * Request may not have originated from ll_rw_blk. if not,
2390          * it didn't come out of our reserved rq pools
2391          */
2392         if (rl) {
2393                 int rw = rq_data_dir(req);
2394
2395                 elv_completed_request(q, req);
2396
2397                 BUG_ON(!list_empty(&req->queuelist));
2398
2399                 blk_free_request(q, req);
2400                 freed_request(q, rw);
2401         }
2402 }
2403
2404 void blk_put_request(struct request *req)
2405 {
2406         /*
2407          * if req->rl isn't set, this request didnt originate from the
2408          * block layer, so it's safe to just disregard it
2409          */
2410         if (req->rl) {
2411                 unsigned long flags;
2412                 request_queue_t *q = req->q;
2413
2414                 spin_lock_irqsave(q->queue_lock, flags);
2415                 __blk_put_request(q, req);
2416                 spin_unlock_irqrestore(q->queue_lock, flags);
2417         }
2418 }
2419
2420 EXPORT_SYMBOL(blk_put_request);
2421
2422 /**
2423  * blk_end_sync_rq - executes a completion event on a request
2424  * @rq: request to complete
2425  */
2426 void blk_end_sync_rq(struct request *rq)
2427 {
2428         struct completion *waiting = rq->waiting;
2429
2430         rq->waiting = NULL;
2431         __blk_put_request(rq->q, rq);
2432
2433         /*
2434          * complete last, if this is a stack request the process (and thus
2435          * the rq pointer) could be invalid right after this complete()
2436          */
2437         complete(waiting);
2438 }
2439 EXPORT_SYMBOL(blk_end_sync_rq);
2440
2441 /**
2442  * blk_congestion_wait - wait for a queue to become uncongested
2443  * @rw: READ or WRITE
2444  * @timeout: timeout in jiffies
2445  *
2446  * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
2447  * If no queues are congested then just wait for the next request to be
2448  * returned.
2449  */
2450 long blk_congestion_wait(int rw, long timeout)
2451 {
2452         long ret;
2453         DEFINE_WAIT(wait);
2454         wait_queue_head_t *wqh = &congestion_wqh[rw];
2455
2456         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
2457         ret = io_schedule_timeout(timeout);
2458         finish_wait(wqh, &wait);
2459         return ret;
2460 }
2461
2462 EXPORT_SYMBOL(blk_congestion_wait);
2463
2464 /*
2465  * Has to be called with the request spinlock acquired
2466  */
2467 static int attempt_merge(request_queue_t *q, struct request *req,
2468                           struct request *next)
2469 {
2470         if (!rq_mergeable(req) || !rq_mergeable(next))
2471                 return 0;
2472
2473         /*
2474          * not contigious
2475          */
2476         if (req->sector + req->nr_sectors != next->sector)
2477                 return 0;
2478
2479         if (rq_data_dir(req) != rq_data_dir(next)
2480             || req->rq_disk != next->rq_disk
2481             || next->waiting || next->special)
2482                 return 0;
2483
2484         /*
2485          * If we are allowed to merge, then append bio list
2486          * from next to rq and release next. merge_requests_fn
2487          * will have updated segment counts, update sector
2488          * counts here.
2489          */
2490         if (!q->merge_requests_fn(q, req, next))
2491                 return 0;
2492
2493         /*
2494          * At this point we have either done a back merge
2495          * or front merge. We need the smaller start_time of
2496          * the merged requests to be the current request
2497          * for accounting purposes.
2498          */
2499         if (time_after(req->start_time, next->start_time))
2500                 req->start_time = next->start_time;
2501
2502         req->biotail->bi_next = next->bio;
2503         req->biotail = next->biotail;
2504
2505         req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
2506
2507         elv_merge_requests(q, req, next);
2508
2509         if (req->rq_disk) {
2510                 disk_round_stats(req->rq_disk);
2511                 req->rq_disk->in_flight--;
2512         }
2513
2514         __blk_put_request(q, next);
2515         return 1;
2516 }
2517
2518 static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2519 {
2520         struct request *next = elv_latter_request(q, rq);
2521
2522         if (next)
2523                 return attempt_merge(q, rq, next);
2524
2525         return 0;
2526 }
2527
2528 static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2529 {
2530         struct request *prev = elv_former_request(q, rq);
2531
2532         if (prev)
2533                 return attempt_merge(q, prev, rq);
2534
2535         return 0;
2536 }
2537
2538 /**
2539  * blk_attempt_remerge  - attempt to remerge active head with next request
2540  * @q:    The &request_queue_t belonging to the device
2541  * @rq:   The head request (usually)
2542  *
2543  * Description:
2544  *    For head-active devices, the queue can easily be unplugged so quickly
2545  *    that proper merging is not done on the front request. This may hurt
2546  *    performance greatly for some devices. The block layer cannot safely
2547  *    do merging on that first request for these queues, but the driver can
2548  *    call this function and make it happen any way. Only the driver knows
2549  *    when it is safe to do so.
2550  **/
2551 void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2552 {
2553         unsigned long flags;
2554
2555         spin_lock_irqsave(q->queue_lock, flags);
2556         attempt_back_merge(q, rq);
2557         spin_unlock_irqrestore(q->queue_lock, flags);
2558 }
2559
2560 EXPORT_SYMBOL(blk_attempt_remerge);
2561
2562 /*
2563  * Non-locking blk_attempt_remerge variant.
2564  */
2565 void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
2566 {
2567         attempt_back_merge(q, rq);
2568 }
2569
2570 EXPORT_SYMBOL(__blk_attempt_remerge);
2571
2572 static int __make_request(request_queue_t *q, struct bio *bio)
2573 {
2574         struct request *req, *freereq = NULL;
2575         int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
2576         sector_t sector;
2577
2578         sector = bio->bi_sector;
2579         nr_sectors = bio_sectors(bio);
2580         cur_nr_sectors = bio_cur_sectors(bio);
2581
2582         rw = bio_data_dir(bio);
2583         sync = bio_sync(bio);
2584
2585         /*
2586          * low level driver can indicate that it wants pages above a
2587          * certain limit bounced to low memory (ie for highmem, or even
2588          * ISA dma in theory)
2589          */
2590         blk_queue_bounce(q, &bio);
2591
2592         spin_lock_prefetch(q->queue_lock);
2593
2594         barrier = bio_barrier(bio);
2595         if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) {
2596                 err = -EOPNOTSUPP;
2597                 goto end_io;
2598         }
2599
2600 again:
2601         spin_lock_irq(q->queue_lock);
2602
2603         if (elv_queue_empty(q)) {
2604                 blk_plug_device(q);
2605                 goto get_rq;
2606         }
2607         if (barrier)
2608                 goto get_rq;
2609
2610         el_ret = elv_merge(q, &req, bio);
2611         switch (el_ret) {
2612                 case ELEVATOR_BACK_MERGE:
2613                         BUG_ON(!rq_mergeable(req));
2614
2615                         if (!q->back_merge_fn(q, req, bio))
2616                                 break;
2617
2618                         req->biotail->bi_next = bio;
2619                         req->biotail = bio;
2620                         req->nr_sectors = req->hard_nr_sectors += nr_sectors;
2621                         drive_stat_acct(req, nr_sectors, 0);
2622                         if (!attempt_back_merge(q, req))
2623                                 elv_merged_request(q, req);
2624                         goto out;
2625
2626                 case ELEVATOR_FRONT_MERGE:
2627                         BUG_ON(!rq_mergeable(req));
2628
2629                         if (!q->front_merge_fn(q, req, bio))
2630                                 break;
2631
2632                         bio->bi_next = req->bio;
2633                         req->bio = bio;
2634
2635                         /*
2636                          * may not be valid. if the low level driver said
2637                          * it didn't need a bounce buffer then it better
2638                          * not touch req->buffer either...
2639                          */
2640                         req->buffer = bio_data(bio);
2641                         req->current_nr_sectors = cur_nr_sectors;
2642                         req->hard_cur_sectors = cur_nr_sectors;
2643                         req->sector = req->hard_sector = sector;
2644                         req->nr_sectors = req->hard_nr_sectors += nr_sectors;
2645                         drive_stat_acct(req, nr_sectors, 0);
2646                         if (!attempt_front_merge(q, req))
2647                                 elv_merged_request(q, req);
2648                         goto out;
2649
2650                 /*
2651                  * elevator says don't/can't merge. get new request
2652                  */
2653                 case ELEVATOR_NO_MERGE:
2654                         break;
2655
2656                 default:
2657                         printk("elevator returned crap (%d)\n", el_ret);
2658                         BUG();
2659         }
2660
2661         /*
2662          * Grab a free request from the freelist - if that is empty, check
2663          * if we are doing read ahead and abort instead of blocking for
2664          * a free slot.
2665          */
2666 get_rq:
2667         if (freereq) {
2668                 req = freereq;
2669                 freereq = NULL;
2670         } else {
2671                 spin_unlock_irq(q->queue_lock);
2672                 if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
2673                         /*
2674                          * READA bit set
2675                          */
2676                         err = -EWOULDBLOCK;
2677                         if (bio_rw_ahead(bio))
2678                                 goto end_io;
2679         
2680                         freereq = get_request_wait(q, rw);
2681                 }
2682                 goto again;
2683         }
2684
2685         req->flags |= REQ_CMD;
2686
2687         /*
2688          * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2689          */
2690         if (bio_rw_ahead(bio) || bio_failfast(bio))
2691                 req->flags |= REQ_FAILFAST;
2692
2693         /*
2694          * REQ_BARRIER implies no merging, but lets make it explicit
2695          */
2696         if (barrier)
2697                 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2698
2699         req->errors = 0;
2700         req->hard_sector = req->sector = sector;
2701         req->hard_nr_sectors = req->nr_sectors = nr_sectors;
2702         req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
2703         req->nr_phys_segments = bio_phys_segments(q, bio);
2704         req->nr_hw_segments = bio_hw_segments(q, bio);
2705         req->buffer = bio_data(bio);    /* see ->buffer comment above */
2706         req->waiting = NULL;
2707         req->bio = req->biotail = bio;
2708         req->rq_disk = bio->bi_bdev->bd_disk;
2709         req->start_time = jiffies;
2710
2711         add_request(q, req);
2712 out:
2713         if (freereq)
2714                 __blk_put_request(q, freereq);
2715         if (sync)
2716                 __generic_unplug_device(q);
2717
2718         spin_unlock_irq(q->queue_lock);
2719         return 0;
2720
2721 end_io:
2722         bio_endio(bio, nr_sectors << 9, err);
2723         return 0;
2724 }
2725
2726 /*
2727  * If bio->bi_dev is a partition, remap the location
2728  */
2729 static inline void blk_partition_remap(struct bio *bio)
2730 {
2731         struct block_device *bdev = bio->bi_bdev;
2732
2733         if (bdev != bdev->bd_contains) {
2734                 struct hd_struct *p = bdev->bd_part;
2735
2736                 switch (bio->bi_rw) {
2737                 case READ:
2738                         p->read_sectors += bio_sectors(bio);
2739                         p->reads++;
2740                         break;
2741                 case WRITE:
2742                         p->write_sectors += bio_sectors(bio);
2743                         p->writes++;
2744                         break;
2745                 }
2746                 bio->bi_sector += p->start_sect;
2747                 bio->bi_bdev = bdev->bd_contains;
2748         }
2749 }
2750
2751 void blk_finish_queue_drain(request_queue_t *q)
2752 {
2753         struct request_list *rl = &q->rq;
2754         struct request *rq;
2755
2756         spin_lock_irq(q->queue_lock);
2757         clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2758
2759         while (!list_empty(&q->drain_list)) {
2760                 rq = list_entry_rq(q->drain_list.next);
2761
2762                 list_del_init(&rq->queuelist);
2763                 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2764         }
2765
2766         spin_unlock_irq(q->queue_lock);
2767
2768         wake_up(&rl->wait[0]);
2769         wake_up(&rl->wait[1]);
2770         wake_up(&rl->drain);
2771 }
2772
2773 static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
2774 {
2775         int wait = rl->count[READ] + rl->count[WRITE];
2776
2777         if (dispatch)
2778                 wait += !list_empty(&q->queue_head);
2779
2780         return wait;
2781 }
2782
2783 /*
2784  * We rely on the fact that only requests allocated through blk_alloc_request()
2785  * have io scheduler private data structures associated with them. Any other
2786  * type of request (allocated on stack or through kmalloc()) should not go
2787  * to the io scheduler core, but be attached to the queue head instead.
2788  */
2789 void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
2790 {
2791         struct request_list *rl = &q->rq;
2792         DEFINE_WAIT(wait);
2793
2794         spin_lock_irq(q->queue_lock);
2795         set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2796
2797         while (wait_drain(q, rl, wait_dispatch)) {
2798                 prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
2799
2800                 if (wait_drain(q, rl, wait_dispatch)) {
2801                         __generic_unplug_device(q);
2802                         spin_unlock_irq(q->queue_lock);
2803                         io_schedule();
2804                         spin_lock_irq(q->queue_lock);
2805                 }
2806
2807                 finish_wait(&rl->drain, &wait);
2808         }
2809
2810         spin_unlock_irq(q->queue_lock);
2811 }
2812
2813 /*
2814  * block waiting for the io scheduler being started again.
2815  */
2816 static inline void block_wait_queue_running(request_queue_t *q)
2817 {
2818         DEFINE_WAIT(wait);
2819
2820         while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
2821                 struct request_list *rl = &q->rq;
2822
2823                 prepare_to_wait_exclusive(&rl->drain, &wait,
2824                                 TASK_UNINTERRUPTIBLE);
2825
2826                 /*
2827                  * re-check the condition. avoids using prepare_to_wait()
2828                  * in the fast path (queue is running)
2829                  */
2830                 if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
2831                         io_schedule();
2832
2833                 finish_wait(&rl->drain, &wait);
2834         }
2835 }
2836
2837 static void handle_bad_sector(struct bio *bio)
2838 {
2839         char b[BDEVNAME_SIZE];
2840
2841         printk(KERN_INFO "attempt to access beyond end of device\n");
2842         printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
2843                         bdevname(bio->bi_bdev, b),
2844                         bio->bi_rw,
2845                         (unsigned long long)bio->bi_sector + bio_sectors(bio),
2846                         (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
2847
2848         set_bit(BIO_EOF, &bio->bi_flags);
2849 }
2850
2851 /**
2852  * generic_make_request: hand a buffer to its device driver for I/O
2853  * @bio:  The bio describing the location in memory and on the device.
2854  *
2855  * generic_make_request() is used to make I/O requests of block
2856  * devices. It is passed a &struct bio, which describes the I/O that needs
2857  * to be done.
2858  *
2859  * generic_make_request() does not return any status.  The
2860  * success/failure status of the request, along with notification of
2861  * completion, is delivered asynchronously through the bio->bi_end_io
2862  * function described (one day) else where.
2863  *
2864  * The caller of generic_make_request must make sure that bi_io_vec
2865  * are set to describe the memory buffer, and that bi_dev and bi_sector are
2866  * set to describe the device address, and the
2867  * bi_end_io and optionally bi_private are set to describe how
2868  * completion notification should be signaled.
2869  *
2870  * generic_make_request and the drivers it calls may use bi_next if this
2871  * bio happens to be merged with someone else, and may change bi_dev and
2872  * bi_sector for remaps as it sees fit.  So the values of these fields
2873  * should NOT be depended on after the call to generic_make_request.
2874  */
2875 void generic_make_request(struct bio *bio)
2876 {
2877         request_queue_t *q;
2878         sector_t maxsector;
2879         int ret, nr_sectors = bio_sectors(bio);
2880
2881         might_sleep();
2882         /* Test device or partition size, when known. */
2883         maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
2884         if (maxsector) {
2885                 sector_t sector = bio->bi_sector;
2886
2887                 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
2888                         /*
2889                          * This may well happen - the kernel calls bread()
2890                          * without checking the size of the device, e.g., when
2891                          * mounting a device.
2892                          */
2893                         handle_bad_sector(bio);
2894                         goto end_io;
2895                 }
2896         }
2897
2898         /*
2899          * Resolve the mapping until finished. (drivers are
2900          * still free to implement/resolve their own stacking
2901          * by explicitly returning 0)
2902          *
2903          * NOTE: we don't repeat the blk_size check for each new device.
2904          * Stacking drivers are expected to know what they are doing.
2905          */
2906         do {
2907                 char b[BDEVNAME_SIZE];
2908
2909                 q = bdev_get_queue(bio->bi_bdev);
2910                 if (!q) {
2911                         printk(KERN_ERR
2912                                "generic_make_request: Trying to access "
2913                                 "nonexistent block-device %s (%Lu)\n",
2914                                 bdevname(bio->bi_bdev, b),
2915                                 (long long) bio->bi_sector);
2916 end_io:
2917                         bio_endio(bio, bio->bi_size, -EIO);
2918                         break;
2919                 }
2920
2921                 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
2922                         printk("bio too big device %s (%u > %u)\n", 
2923                                 bdevname(bio->bi_bdev, b),
2924                                 bio_sectors(bio),
2925                                 q->max_hw_sectors);
2926                         goto end_io;
2927                 }
2928
2929                 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
2930                         goto end_io;
2931
2932                 block_wait_queue_running(q);
2933
2934                 /*
2935                  * If this device has partitions, remap block n
2936                  * of partition p to block n+start(p) of the disk.
2937                  */
2938                 blk_partition_remap(bio);
2939
2940                 ret = q->make_request_fn(q, bio);
2941         } while (ret);
2942 }
2943
2944 EXPORT_SYMBOL(generic_make_request);
2945
2946 /**
2947  * submit_bio: submit a bio to the block device layer for I/O
2948  * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
2949  * @bio: The &struct bio which describes the I/O
2950  *
2951  * submit_bio() is very similar in purpose to generic_make_request(), and
2952  * uses that function to do most of the work. Both are fairly rough
2953  * interfaces, @bio must be presetup and ready for I/O.
2954  *
2955  */
2956 void submit_bio(int rw, struct bio *bio)
2957 {
2958         int count = bio_sectors(bio);
2959
2960         BIO_BUG_ON(!bio->bi_size);
2961         BIO_BUG_ON(!bio->bi_io_vec);
2962         bio->bi_rw = rw;
2963         if (rw & WRITE)
2964                 mod_page_state(pgpgout, count);
2965         else
2966                 mod_page_state(pgpgin, count);
2967
2968         if (unlikely(block_dump)) {
2969                 char b[BDEVNAME_SIZE];
2970                 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
2971                         current->comm, current->pid,
2972                         (rw & WRITE) ? "WRITE" : "READ",
2973                         (unsigned long long)bio->bi_sector,
2974                         bdevname(bio->bi_bdev,b));
2975         }
2976
2977         generic_make_request(bio);
2978 }
2979
2980 EXPORT_SYMBOL(submit_bio);
2981
2982 void blk_recalc_rq_segments(struct request *rq)
2983 {
2984         struct bio *bio, *prevbio = NULL;
2985         int nr_phys_segs, nr_hw_segs;
2986         unsigned int phys_size, hw_size;
2987         request_queue_t *q = rq->q;
2988
2989         if (!rq->bio)
2990                 return;
2991
2992         phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
2993         rq_for_each_bio(bio, rq) {
2994                 /* Force bio hw/phys segs to be recalculated. */
2995                 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
2996
2997                 nr_phys_segs += bio_phys_segments(q, bio);
2998                 nr_hw_segs += bio_hw_segments(q, bio);
2999                 if (prevbio) {
3000                         int pseg = phys_size + prevbio->bi_size + bio->bi_size;
3001                         int hseg = hw_size + prevbio->bi_size + bio->bi_size;
3002
3003                         if (blk_phys_contig_segment(q, prevbio, bio) &&
3004                             pseg <= q->max_segment_size) {
3005                                 nr_phys_segs--;
3006                                 phys_size += prevbio->bi_size + bio->bi_size;
3007                         } else
3008                                 phys_size = 0;
3009
3010                         if (blk_hw_contig_segment(q, prevbio, bio) &&
3011                             hseg <= q->max_segment_size) {
3012                                 nr_hw_segs--;
3013                                 hw_size += prevbio->bi_size + bio->bi_size;
3014                         } else
3015                                 hw_size = 0;
3016                 }
3017                 prevbio = bio;
3018         }
3019
3020         rq->nr_phys_segments = nr_phys_segs;
3021         rq->nr_hw_segments = nr_hw_segs;
3022 }
3023
3024 void blk_recalc_rq_sectors(struct request *rq, int nsect)
3025 {
3026         if (blk_fs_request(rq)) {
3027                 rq->hard_sector += nsect;
3028                 rq->hard_nr_sectors -= nsect;
3029
3030                 /*
3031                  * Move the I/O submission pointers ahead if required.
3032                  */
3033                 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
3034                     (rq->sector <= rq->hard_sector)) {
3035                         rq->sector = rq->hard_sector;
3036                         rq->nr_sectors = rq->hard_nr_sectors;
3037                         rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
3038                         rq->current_nr_sectors = rq->hard_cur_sectors;
3039                         rq->buffer = bio_data(rq->bio);
3040                 }
3041
3042                 /*
3043                  * if total number of sectors is less than the first segment
3044                  * size, something has gone terribly wrong
3045                  */
3046                 if (rq->nr_sectors < rq->current_nr_sectors) {
3047                         printk("blk: request botched\n");
3048                         rq->nr_sectors = rq->current_nr_sectors;
3049                 }
3050         }
3051 }
3052
3053 static int __end_that_request_first(struct request *req, int uptodate,
3054                                     int nr_bytes)
3055 {
3056         int total_bytes, bio_nbytes, error, next_idx = 0;
3057         struct bio *bio;
3058
3059         /*
3060          * extend uptodate bool to allow < 0 value to be direct io error
3061          */
3062         error = 0;
3063         if (end_io_error(uptodate))
3064                 error = !uptodate ? -EIO : uptodate;
3065
3066         /*
3067          * for a REQ_BLOCK_PC request, we want to carry any eventual
3068          * sense key with us all the way through
3069          */
3070         if (!blk_pc_request(req))
3071                 req->errors = 0;
3072
3073         if (!uptodate) {
3074                 if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
3075                         printk("end_request: I/O error, dev %s, sector %llu\n",
3076                                 req->rq_disk ? req->rq_disk->disk_name : "?",
3077                                 (unsigned long long)req->sector);
3078         }
3079
3080         total_bytes = bio_nbytes = 0;
3081         while ((bio = req->bio) != NULL) {
3082                 int nbytes;
3083
3084                 if (nr_bytes >= bio->bi_size) {
3085                         req->bio = bio->bi_next;
3086                         nbytes = bio->bi_size;
3087                         bio_endio(bio, nbytes, error);
3088                         next_idx = 0;
3089                         bio_nbytes = 0;
3090                 } else {
3091                         int idx = bio->bi_idx + next_idx;
3092
3093                         if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
3094                                 blk_dump_rq_flags(req, "__end_that");
3095                                 printk("%s: bio idx %d >= vcnt %d\n",
3096                                                 __FUNCTION__,
3097                                                 bio->bi_idx, bio->bi_vcnt);
3098                                 break;
3099                         }
3100
3101                         nbytes = bio_iovec_idx(bio, idx)->bv_len;
3102                         BIO_BUG_ON(nbytes > bio->bi_size);
3103
3104                         /*
3105                          * not a complete bvec done
3106                          */
3107                         if (unlikely(nbytes > nr_bytes)) {
3108                                 bio_nbytes += nr_bytes;
3109                                 total_bytes += nr_bytes;
3110                                 break;
3111                         }
3112
3113                         /*
3114                          * advance to the next vector
3115                          */
3116                         next_idx++;
3117                         bio_nbytes += nbytes;
3118                 }
3119
3120                 total_bytes += nbytes;
3121                 nr_bytes -= nbytes;
3122
3123                 if ((bio = req->bio)) {
3124                         /*
3125                          * end more in this run, or just return 'not-done'
3126                          */
3127                         if (unlikely(nr_bytes <= 0))
3128                                 break;
3129                 }
3130         }
3131
3132         /*
3133          * completely done
3134          */
3135         if (!req->bio)
3136                 return 0;
3137
3138         /*
3139          * if the request wasn't completed, update state
3140          */
3141         if (bio_nbytes) {
3142                 bio_endio(bio, bio_nbytes, error);
3143                 bio->bi_idx += next_idx;
3144                 bio_iovec(bio)->bv_offset += nr_bytes;
3145                 bio_iovec(bio)->bv_len -= nr_bytes;
3146         }
3147
3148         blk_recalc_rq_sectors(req, total_bytes >> 9);
3149         blk_recalc_rq_segments(req);
3150         return 1;
3151 }
3152
3153 /**
3154  * end_that_request_first - end I/O on a request
3155  * @req:      the request being processed
3156  * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3157  * @nr_sectors: number of sectors to end I/O on
3158  *
3159  * Description:
3160  *     Ends I/O on a number of sectors attached to @req, and sets it up
3161  *     for the next range of segments (if any) in the cluster.
3162  *
3163  * Return:
3164  *     0 - we are done with this request, call end_that_request_last()
3165  *     1 - still buffers pending for this request
3166  **/
3167 int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
3168 {
3169         return __end_that_request_first(req, uptodate, nr_sectors << 9);
3170 }
3171
3172 EXPORT_SYMBOL(end_that_request_first);
3173
3174 /**
3175  * end_that_request_chunk - end I/O on a request
3176  * @req:      the request being processed
3177  * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3178  * @nr_bytes: number of bytes to complete
3179  *
3180  * Description:
3181  *     Ends I/O on a number of bytes attached to @req, and sets it up
3182  *     for the next range of segments (if any). Like end_that_request_first(),
3183  *     but deals with bytes instead of sectors.
3184  *
3185  * Return:
3186  *     0 - we are done with this request, call end_that_request_last()
3187  *     1 - still buffers pending for this request
3188  **/
3189 int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3190 {
3191         return __end_that_request_first(req, uptodate, nr_bytes);
3192 }
3193
3194 EXPORT_SYMBOL(end_that_request_chunk);
3195
3196 /*
3197  * queue lock must be held
3198  */
3199 void end_that_request_last(struct request *req)
3200 {
3201         struct gendisk *disk = req->rq_disk;
3202
3203         if (unlikely(laptop_mode) && blk_fs_request(req))
3204                 laptop_io_completion();
3205
3206         if (disk && blk_fs_request(req)) {
3207                 unsigned long duration = jiffies - req->start_time;
3208                 switch (rq_data_dir(req)) {
3209                     case WRITE:
3210                         __disk_stat_inc(disk, writes);
3211                         __disk_stat_add(disk, write_ticks, duration);
3212                         break;
3213                     case READ:
3214                         __disk_stat_inc(disk, reads);
3215                         __disk_stat_add(disk, read_ticks, duration);
3216                         break;
3217                 }
3218                 disk_round_stats(disk);
3219                 disk->in_flight--;
3220         }
3221         if (req->end_io)
3222                 req->end_io(req);
3223         else
3224                 __blk_put_request(req->q, req);
3225 }
3226
3227 EXPORT_SYMBOL(end_that_request_last);
3228
3229 void end_request(struct request *req, int uptodate)
3230 {
3231         if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
3232                 add_disk_randomness(req->rq_disk);
3233                 blkdev_dequeue_request(req);
3234                 end_that_request_last(req);
3235         }
3236 }
3237
3238 EXPORT_SYMBOL(end_request);
3239
3240 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3241 {
3242         /* first three bits are identical in rq->flags and bio->bi_rw */
3243         rq->flags |= (bio->bi_rw & 7);
3244
3245         rq->nr_phys_segments = bio_phys_segments(q, bio);
3246         rq->nr_hw_segments = bio_hw_segments(q, bio);
3247         rq->current_nr_sectors = bio_cur_sectors(bio);
3248         rq->hard_cur_sectors = rq->current_nr_sectors;
3249         rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
3250         rq->buffer = bio_data(bio);
3251
3252         rq->bio = rq->biotail = bio;
3253 }
3254
3255 EXPORT_SYMBOL(blk_rq_bio_prep);
3256
3257 int kblockd_schedule_work(struct work_struct *work)
3258 {
3259         return queue_work(kblockd_workqueue, work);
3260 }
3261
3262 EXPORT_SYMBOL(kblockd_schedule_work);
3263
3264 void kblockd_flush(void)
3265 {
3266         flush_workqueue(kblockd_workqueue);
3267 }
3268 EXPORT_SYMBOL(kblockd_flush);
3269
3270 int __init blk_dev_init(void)
3271 {
3272         kblockd_workqueue = create_workqueue("kblockd");
3273         if (!kblockd_workqueue)
3274                 panic("Failed to create kblockd\n");
3275
3276         request_cachep = kmem_cache_create("blkdev_requests",
3277                         sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
3278
3279         requestq_cachep = kmem_cache_create("blkdev_queue",
3280                         sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
3281
3282         iocontext_cachep = kmem_cache_create("blkdev_ioc",
3283                         sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3284
3285         blk_max_low_pfn = max_low_pfn;
3286         blk_max_pfn = max_pfn;
3287
3288         return 0;
3289 }
3290
3291 /*
3292  * IO Context helper functions
3293  */
3294 void put_io_context(struct io_context *ioc)
3295 {
3296         if (ioc == NULL)
3297                 return;
3298
3299         BUG_ON(atomic_read(&ioc->refcount) == 0);
3300
3301         if (atomic_dec_and_test(&ioc->refcount)) {
3302                 if (ioc->aic && ioc->aic->dtor)
3303                         ioc->aic->dtor(ioc->aic);
3304                 if (ioc->cic && ioc->cic->dtor)
3305                         ioc->cic->dtor(ioc->cic);
3306
3307                 kmem_cache_free(iocontext_cachep, ioc);
3308         }
3309 }
3310 EXPORT_SYMBOL(put_io_context);
3311
3312 /* Called by the exitting task */
3313 void exit_io_context(void)
3314 {
3315         unsigned long flags;
3316         struct io_context *ioc;
3317
3318         local_irq_save(flags);
3319         ioc = current->io_context;
3320         current->io_context = NULL;
3321         local_irq_restore(flags);
3322
3323         if (ioc->aic && ioc->aic->exit)
3324                 ioc->aic->exit(ioc->aic);
3325         if (ioc->cic && ioc->cic->exit)
3326                 ioc->cic->exit(ioc->cic);
3327
3328         put_io_context(ioc);
3329 }
3330
3331 /*
3332  * If the current task has no IO context then create one and initialise it.
3333  * If it does have a context, take a ref on it.
3334  *
3335  * This is always called in the context of the task which submitted the I/O.
3336  * But weird things happen, so we disable local interrupts to ensure exclusive
3337  * access to *current.
3338  */
3339 struct io_context *get_io_context(int gfp_flags)
3340 {
3341         struct task_struct *tsk = current;
3342         unsigned long flags;
3343         struct io_context *ret;
3344
3345         local_irq_save(flags);
3346         ret = tsk->io_context;
3347         if (ret)
3348                 goto out;
3349
3350         local_irq_restore(flags);
3351
3352         ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
3353         if (ret) {
3354                 atomic_set(&ret->refcount, 1);
3355                 ret->pid = tsk->pid;
3356                 ret->last_waited = jiffies; /* doesn't matter... */
3357                 ret->nr_batch_requests = 0; /* because this is 0 */
3358                 ret->aic = NULL;
3359                 ret->cic = NULL;
3360                 spin_lock_init(&ret->lock);
3361
3362                 local_irq_save(flags);
3363
3364                 /*
3365                  * very unlikely, someone raced with us in setting up the task
3366                  * io context. free new context and just grab a reference.
3367                  */
3368                 if (!tsk->io_context)
3369                         tsk->io_context = ret;
3370                 else {
3371                         kmem_cache_free(iocontext_cachep, ret);
3372                         ret = tsk->io_context;
3373                 }
3374
3375 out:
3376                 atomic_inc(&ret->refcount);
3377                 local_irq_restore(flags);
3378         }
3379
3380         return ret;
3381 }
3382 EXPORT_SYMBOL(get_io_context);
3383
3384 void copy_io_context(struct io_context **pdst, struct io_context **psrc)
3385 {
3386         struct io_context *src = *psrc;
3387         struct io_context *dst = *pdst;
3388
3389         if (src) {
3390                 BUG_ON(atomic_read(&src->refcount) == 0);
3391                 atomic_inc(&src->refcount);
3392                 put_io_context(dst);
3393                 *pdst = src;
3394         }
3395 }
3396 EXPORT_SYMBOL(copy_io_context);
3397
3398 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
3399 {
3400         struct io_context *temp;
3401         temp = *ioc1;
3402         *ioc1 = *ioc2;
3403         *ioc2 = temp;
3404 }
3405 EXPORT_SYMBOL(swap_io_context);
3406
3407 /*
3408  * sysfs parts below
3409  */
3410 struct queue_sysfs_entry {
3411         struct attribute attr;
3412         ssize_t (*show)(struct request_queue *, char *);
3413         ssize_t (*store)(struct request_queue *, const char *, size_t);
3414 };
3415
3416 static ssize_t
3417 queue_var_show(unsigned int var, char *page)
3418 {
3419         return sprintf(page, "%d\n", var);
3420 }
3421
3422 static ssize_t
3423 queue_var_store(unsigned long *var, const char *page, size_t count)
3424 {
3425         char *p = (char *) page;
3426
3427         *var = simple_strtoul(p, &p, 10);
3428         return count;
3429 }
3430
3431 static ssize_t queue_requests_show(struct request_queue *q, char *page)
3432 {
3433         return queue_var_show(q->nr_requests, (page));
3434 }
3435
3436 static ssize_t
3437 queue_requests_store(struct request_queue *q, const char *page, size_t count)
3438 {
3439         struct request_list *rl = &q->rq;
3440
3441         int ret = queue_var_store(&q->nr_requests, page, count);
3442         if (q->nr_requests < BLKDEV_MIN_RQ)
3443                 q->nr_requests = BLKDEV_MIN_RQ;
3444         blk_queue_congestion_threshold(q);
3445
3446         if (rl->count[READ] >= queue_congestion_on_threshold(q))
3447                 set_queue_congested(q, READ);
3448         else if (rl->count[READ] < queue_congestion_off_threshold(q))
3449                 clear_queue_congested(q, READ);
3450
3451         if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
3452                 set_queue_congested(q, WRITE);
3453         else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
3454                 clear_queue_congested(q, WRITE);
3455
3456         if (rl->count[READ] >= q->nr_requests) {
3457                 blk_set_queue_full(q, READ);
3458         } else if (rl->count[READ]+1 <= q->nr_requests) {
3459                 blk_clear_queue_full(q, READ);
3460                 wake_up(&rl->wait[READ]);
3461         }
3462
3463         if (rl->count[WRITE] >= q->nr_requests) {
3464                 blk_set_queue_full(q, WRITE);
3465         } else if (rl->count[WRITE]+1 <= q->nr_requests) {
3466                 blk_clear_queue_full(q, WRITE);
3467                 wake_up(&rl->wait[WRITE]);
3468         }
3469         return ret;
3470 }
3471
3472 static ssize_t queue_ra_show(struct request_queue *q, char *page)
3473 {
3474         int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3475
3476         return queue_var_show(ra_kb, (page));
3477 }
3478
3479 static ssize_t
3480 queue_ra_store(struct request_queue *q, const char *page, size_t count)
3481 {
3482         unsigned long ra_kb;
3483         ssize_t ret = queue_var_store(&ra_kb, page, count);
3484
3485         spin_lock_irq(q->queue_lock);
3486         if (ra_kb > (q->max_sectors >> 1))
3487                 ra_kb = (q->max_sectors >> 1);
3488
3489         q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
3490         spin_unlock_irq(q->queue_lock);
3491
3492         return ret;
3493 }
3494
3495 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
3496 {
3497         int max_sectors_kb = q->max_sectors >> 1;
3498
3499         return queue_var_show(max_sectors_kb, (page));
3500 }
3501
3502 static ssize_t
3503 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
3504 {
3505         unsigned long max_sectors_kb,
3506                         max_hw_sectors_kb = q->max_hw_sectors >> 1,
3507                         page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
3508         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
3509         int ra_kb;
3510
3511         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
3512                 return -EINVAL;
3513         /*
3514          * Take the queue lock to update the readahead and max_sectors
3515          * values synchronously:
3516          */
3517         spin_lock_irq(q->queue_lock);
3518         /*
3519          * Trim readahead window as well, if necessary:
3520          */
3521         ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3522         if (ra_kb > max_sectors_kb)
3523                 q->backing_dev_info.ra_pages =
3524                                 max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
3525
3526         q->max_sectors = max_sectors_kb << 1;
3527         spin_unlock_irq(q->queue_lock);
3528
3529         return ret;
3530 }
3531
3532 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
3533 {
3534         int max_hw_sectors_kb = q->max_hw_sectors >> 1;
3535
3536         return queue_var_show(max_hw_sectors_kb, (page));
3537 }
3538
3539
3540 static struct queue_sysfs_entry queue_requests_entry = {
3541         .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
3542         .show = queue_requests_show,
3543         .store = queue_requests_store,
3544 };
3545
3546 static struct queue_sysfs_entry queue_ra_entry = {
3547         .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
3548         .show = queue_ra_show,
3549         .store = queue_ra_store,
3550 };
3551
3552 static struct queue_sysfs_entry queue_max_sectors_entry = {
3553         .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
3554         .show = queue_max_sectors_show,
3555         .store = queue_max_sectors_store,
3556 };
3557
3558 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
3559         .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
3560         .show = queue_max_hw_sectors_show,
3561 };
3562
3563 static struct queue_sysfs_entry queue_iosched_entry = {
3564         .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
3565         .show = elv_iosched_show,
3566         .store = elv_iosched_store,
3567 };
3568
3569 static struct attribute *default_attrs[] = {
3570         &queue_requests_entry.attr,
3571         &queue_ra_entry.attr,
3572         &queue_max_hw_sectors_entry.attr,
3573         &queue_max_sectors_entry.attr,
3574         &queue_iosched_entry.attr,
3575         NULL,
3576 };
3577
3578 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
3579
3580 static ssize_t
3581 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3582 {
3583         struct queue_sysfs_entry *entry = to_queue(attr);
3584         struct request_queue *q;
3585
3586         q = container_of(kobj, struct request_queue, kobj);
3587         if (!entry->show)
3588                 return -EIO;
3589
3590         return entry->show(q, page);
3591 }
3592
3593 static ssize_t
3594 queue_attr_store(struct kobject *kobj, struct attribute *attr,
3595                     const char *page, size_t length)
3596 {
3597         struct queue_sysfs_entry *entry = to_queue(attr);
3598         struct request_queue *q;
3599
3600         q = container_of(kobj, struct request_queue, kobj);
3601         if (!entry->store)
3602                 return -EIO;
3603
3604         return entry->store(q, page, length);
3605 }
3606
3607 static struct sysfs_ops queue_sysfs_ops = {
3608         .show   = queue_attr_show,
3609         .store  = queue_attr_store,
3610 };
3611
3612 struct kobj_type queue_ktype = {
3613         .sysfs_ops      = &queue_sysfs_ops,
3614         .default_attrs  = default_attrs,
3615 };
3616
3617 int blk_register_queue(struct gendisk *disk)
3618 {
3619         int ret;
3620
3621         request_queue_t *q = disk->queue;
3622
3623         if (!q || !q->request_fn)
3624                 return -ENXIO;
3625
3626         q->kobj.parent = kobject_get(&disk->kobj);
3627         if (!q->kobj.parent)
3628                 return -EBUSY;
3629
3630         snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
3631         q->kobj.ktype = &queue_ktype;
3632
3633         ret = kobject_register(&q->kobj);
3634         if (ret < 0)
3635                 return ret;
3636
3637         ret = elv_register_queue(q);
3638         if (ret) {
3639                 kobject_unregister(&q->kobj);
3640                 return ret;
3641         }
3642
3643         return 0;
3644 }
3645
3646 void blk_unregister_queue(struct gendisk *disk)
3647 {
3648         request_queue_t *q = disk->queue;
3649
3650         if (q && q->request_fn) {
3651                 elv_unregister_queue(q);
3652
3653                 kobject_unregister(&q->kobj);
3654                 kobject_put(&disk->kobj);
3655         }
3656 }