]> err.no Git - linux-2.6/commitdiff
[PATCH] blk: __make_request efficiency
authorNick Piggin <nickpiggin@yahoo.com.au>
Wed, 29 Jun 2005 03:45:13 +0000 (20:45 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Wed, 29 Jun 2005 04:20:34 +0000 (21:20 -0700)
In the case where the request is not able to be merged by the elevator, don't
retake the lock and retry the merge mechanism after allocating a new request.

Instead assume that the chance of a merge remains slim, and now that we've
done most of the work allocating a request we may as well just go with it.

Also be rid of the GFP_ATOMIC allocation: we've got working mempools for the
block layer now, so let's save atomic memory for things like networking.

Lastly, in get_request_wait, do an initial get_request call before going into
the waitqueue.  This is reported to help efficiency.

Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/block/ll_rw_blk.c

index 6c98cf04271407163e6c201922a20675b9b6ebd8..67431f28015473a434b554622d07d365d834b7c8 100644 (file)
@@ -1971,10 +1971,11 @@ out:
 static struct request *get_request_wait(request_queue_t *q, int rw,
                                        struct bio *bio)
 {
-       DEFINE_WAIT(wait);
        struct request *rq;
 
-       do {
+       rq = get_request(q, rw, bio, GFP_NOIO);
+       while (!rq) {
+               DEFINE_WAIT(wait);
                struct request_list *rl = &q->rq;
 
                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
@@ -1999,7 +2000,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
                        put_io_context(ioc);
                }
                finish_wait(&rl->wait[rw], &wait);
-       } while (!rq);
+       }
 
        return rq;
 }
@@ -2521,7 +2522,7 @@ EXPORT_SYMBOL(blk_attempt_remerge);
 
 static int __make_request(request_queue_t *q, struct bio *bio)
 {
-       struct request *req, *freereq = NULL;
+       struct request *req;
        int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
        unsigned short prio;
        sector_t sector;
@@ -2549,14 +2550,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
                goto end_io;
        }
 
-again:
        spin_lock_irq(q->queue_lock);
 
-       if (elv_queue_empty(q)) {
-               blk_plug_device(q);
-               goto get_rq;
-       }
-       if (barrier)
+       if (unlikely(barrier) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -2601,40 +2597,23 @@ again:
                                elv_merged_request(q, req);
                        goto out;
 
-               /*
-                * elevator says don't/can't merge. get new request
-                */
-               case ELEVATOR_NO_MERGE:
-                       break;
-
+               /* ELV_NO_MERGE: elevator says don't/can't merge. */
                default:
-                       printk("elevator returned crap (%d)\n", el_ret);
-                       BUG();
+                       ;
        }
 
+get_rq:
        /*
-        * Grab a free request from the freelist - if that is empty, check
-        * if we are doing read ahead and abort instead of blocking for
-        * a free slot.
+        * Grab a free request. This is might sleep but can not fail.
+        */
+       spin_unlock_irq(q->queue_lock);
+       req = get_request_wait(q, rw, bio);
+       /*
+        * After dropping the lock and possibly sleeping here, our request
+        * may now be mergeable after it had proven unmergeable (above).
+        * We don't worry about that case for efficiency. It won't happen
+        * often, and the elevators are able to handle it.
         */
-get_rq:
-       if (freereq) {
-               req = freereq;
-               freereq = NULL;
-       } else {
-               spin_unlock_irq(q->queue_lock);
-               if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
-                       /*
-                        * READA bit set
-                        */
-                       err = -EWOULDBLOCK;
-                       if (bio_rw_ahead(bio))
-                               goto end_io;
-       
-                       freereq = get_request_wait(q, rw, bio);
-               }
-               goto again;
-       }
 
        req->flags |= REQ_CMD;
 
@@ -2663,10 +2642,11 @@ get_rq:
        req->rq_disk = bio->bi_bdev->bd_disk;
        req->start_time = jiffies;
 
+       spin_lock_irq(q->queue_lock);
+       if (elv_queue_empty(q))
+               blk_plug_device(q);
        add_request(q, req);
 out:
-       if (freereq)
-               __blk_put_request(q, freereq);
        if (sync)
                __generic_unplug_device(q);