]> err.no Git - linux-2.6/blobdiff - block/cfq-iosched.c
ocfs2: ->fallocate() support
[linux-2.6] / block / cfq-iosched.c
index e859b4966e4ccddeaa361177cf57dba568aa3657..9755a3cfad26e7f3ed50e2dbff15adaec15c4470 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
-#include <linux/hash.h>
 #include <linux/rbtree.h>
 #include <linux/ioprio.h>
 
@@ -38,14 +37,6 @@ static int cfq_slice_idle = HZ / 125;
 
 #define CFQ_SLICE_SCALE                (5)
 
-#define CFQ_KEY_ASYNC          (0)
-
-/*
- * for the hash of cfqq inside the cfqd
- */
-#define CFQ_QHASH_SHIFT                6
-#define CFQ_QHASH_ENTRIES      (1 << CFQ_QHASH_SHIFT)
-
 #define RQ_CIC(rq)             ((struct cfq_io_context*)(rq)->elevator_private)
 #define RQ_CFQQ(rq)            ((rq)->elevator_private2)
 
@@ -62,8 +53,6 @@ static struct completion *ioc_gone;
 #define ASYNC                  (0)
 #define SYNC                   (1)
 
-#define cfq_cfqq_sync(cfqq)    ((cfqq)->key != CFQ_KEY_ASYNC)
-
 #define sample_valid(samples)  ((samples) > 80)
 
 /*
@@ -90,11 +79,6 @@ struct cfq_data {
        struct cfq_rb_root service_tree;
        unsigned int busy_queues;
 
-       /*
-        * cfqq lookup hash
-        */
-       struct hlist_head *cfq_hash;
-
        int rq_in_driver;
        int sync_flight;
        int hw_tag;
@@ -108,6 +92,8 @@ struct cfq_data {
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
 
+       struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+
        struct timer_list idle_class_timer;
 
        sector_t last_position;
@@ -138,10 +124,6 @@ struct cfq_queue {
        atomic_t ref;
        /* parent cfq_data */
        struct cfq_data *cfqd;
-       /* cfqq lookup hash */
-       struct hlist_node cfq_hash;
-       /* hash key */
-       unsigned int key;
        /* service_tree member */
        struct rb_node rb_node;
        /* service_tree key */
@@ -186,6 +168,7 @@ enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
        CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
+       CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 };
 
 #define CFQ_CFQQ_FNS(name)                                             \
@@ -212,11 +195,38 @@ CFQ_CFQQ_FNS(idle_window);
 CFQ_CFQQ_FNS(prio_changed);
 CFQ_CFQQ_FNS(queue_new);
 CFQ_CFQQ_FNS(slice_new);
+CFQ_CFQQ_FNS(sync);
 #undef CFQ_CFQQ_FNS
 
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
 static void cfq_dispatch_insert(request_queue_t *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+                                      struct task_struct *, gfp_t);
+static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
+                                               struct io_context *);
+
+static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
+                                           int is_sync)
+{
+       return cic->cfqq[!!is_sync];
+}
+
+static inline void cic_set_cfqq(struct cfq_io_context *cic,
+                               struct cfq_queue *cfqq, int is_sync)
+{
+       cic->cfqq[!!is_sync] = cfqq;
+}
+
+/*
+ * We regard a request as SYNC, if it's either a read or has the SYNC bit
+ * set (in which case it could also be direct WRITE).
+ */
+static inline int cfq_bio_sync(struct bio *bio)
+{
+       if (bio_data_dir(bio) == READ || bio_sync(bio))
+               return 1;
+
+       return 0;
+}
 
 /*
  * scheduler run of queue, if there are requests pending and no one in the
@@ -235,17 +245,6 @@ static int cfq_queue_empty(request_queue_t *q)
        return !cfqd->busy_queues;
 }
 
-static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
-{
-       /*
-        * Use the per-process queue, for read requests and syncronous writes
-        */
-       if (!(rw & REQ_RW) || is_sync)
-               return task->pid;
-
-       return CFQ_KEY_ASYNC;
-}
-
 /*
  * Scale schedule slice based on io priority. Use the sync time slice only
  * if a queue is marked sync and has sync io queued. A sync queue with async
@@ -608,10 +607,14 @@ static struct request *
 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 {
        struct task_struct *tsk = current;
-       pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
+       struct cfq_io_context *cic;
        struct cfq_queue *cfqq;
 
-       cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
+       cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
+       if (!cic)
+               return NULL;
+
+       cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
        if (cfqq) {
                sector_t sector = bio->bi_sector + bio_sectors(bio);
 
@@ -705,23 +708,24 @@ static int cfq_allow_merge(request_queue_t *q, struct request *rq,
                           struct bio *bio)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       const int rw = bio_data_dir(bio);
+       struct cfq_io_context *cic;
        struct cfq_queue *cfqq;
-       pid_t key;
 
        /*
         * Disallow merge of a sync bio into an async request.
         */
-       if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
+       if (cfq_bio_sync(bio) && !rq_is_sync(rq))
                return 0;
 
        /*
         * Lookup the cfqq that this bio will be queued with. Allow
         * merge only if rq is queued there.
         */
-       key = cfq_queue_pid(current, rw, bio_sync(bio));
-       cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
+       cic = cfq_cic_rb_lookup(cfqd, current->io_context);
+       if (!cic)
+               return 0;
 
+       cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
        if (cfqq == RQ_CFQQ(rq))
                return 1;
 
@@ -1154,43 +1158,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
                cfq_schedule_dispatch(cfqd);
        }
 
-       /*
-        * it's on the empty list and still hashed
-        */
-       hlist_del(&cfqq->cfq_hash);
        kmem_cache_free(cfq_pool, cfqq);
 }
 
-static struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
-                   const int hashval)
-{
-       struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
-       struct hlist_node *entry;
-       struct cfq_queue *__cfqq;
-
-       hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
-               const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
-
-               if (__cfqq->key == key && (__p == prio || !prio))
-                       return __cfqq;
-       }
-
-       return NULL;
-}
-
-static struct cfq_queue *
-cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
-{
-       return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
-}
-
 static void cfq_free_io_context(struct io_context *ioc)
 {
        struct cfq_io_context *__cic;
        struct rb_node *n;
        int freed = 0;
 
+       ioc->ioc_data = NULL;
+
        while ((n = rb_first(&ioc->cic_root)) != NULL) {
                __cic = rb_entry(n, struct cfq_io_context, rb_node);
                rb_erase(&__cic->rb_node, &ioc->cic_root);
@@ -1254,10 +1232,11 @@ static void cfq_exit_io_context(struct io_context *ioc)
        struct cfq_io_context *__cic;
        struct rb_node *n;
 
+       ioc->ioc_data = NULL;
+
        /*
         * put the reference this task is holding to the various queues
         */
-
        n = rb_first(&ioc->cic_root);
        while (n != NULL) {
                __cic = rb_entry(n, struct cfq_io_context, rb_node);
@@ -1272,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct cfq_io_context *cic;
 
-       cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+       cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+                                                       cfqd->queue->node);
        if (cic) {
-               memset(cic, 0, sizeof(*cic));
                cic->last_end_request = jiffies;
                INIT_LIST_HEAD(&cic->queue_list);
                cic->dtor = cfq_free_io_context;
@@ -1342,7 +1321,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
        cfqq = cic->cfqq[ASYNC];
        if (cfqq) {
                struct cfq_queue *new_cfqq;
-               new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
+               new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
                                         GFP_ATOMIC);
                if (new_cfqq) {
                        cic->cfqq[ASYNC] = new_cfqq;
@@ -1374,16 +1353,16 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
-             gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+                    struct task_struct *tsk, gfp_t gfp_mask)
 {
-       const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
        struct cfq_queue *cfqq, *new_cfqq = NULL;
-       unsigned short ioprio;
+       struct cfq_io_context *cic;
 
 retry:
-       ioprio = tsk->ioprio;
-       cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
+       cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
+       /* cic always exists here */
+       cfqq = cic_to_cfqq(cic, is_sync);
 
        if (!cfqq) {
                if (new_cfqq) {
@@ -1397,43 +1376,68 @@ retry:
                         * free memory.
                         */
                        spin_unlock_irq(cfqd->queue->queue_lock);
-                       new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+                       new_cfqq = kmem_cache_alloc_node(cfq_pool,
+                                       gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+                                       cfqd->queue->node);
                        spin_lock_irq(cfqd->queue->queue_lock);
                        goto retry;
                } else {
-                       cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+                       cfqq = kmem_cache_alloc_node(cfq_pool,
+                                       gfp_mask | __GFP_ZERO,
+                                       cfqd->queue->node);
                        if (!cfqq)
                                goto out;
                }
 
-               memset(cfqq, 0, sizeof(*cfqq));
-
-               INIT_HLIST_NODE(&cfqq->cfq_hash);
                RB_CLEAR_NODE(&cfqq->rb_node);
                INIT_LIST_HEAD(&cfqq->fifo);
 
-               cfqq->key = key;
-               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
                atomic_set(&cfqq->ref, 0);
                cfqq->cfqd = cfqd;
 
-               if (key != CFQ_KEY_ASYNC)
+               if (is_sync) {
                        cfq_mark_cfqq_idle_window(cfqq);
+                       cfq_mark_cfqq_sync(cfqq);
+               }
 
                cfq_mark_cfqq_prio_changed(cfqq);
                cfq_mark_cfqq_queue_new(cfqq);
+
                cfq_init_prio_data(cfqq);
        }
 
        if (new_cfqq)
                kmem_cache_free(cfq_pool, new_cfqq);
 
-       atomic_inc(&cfqq->ref);
 out:
        WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
        return cfqq;
 }
 
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+             gfp_t gfp_mask)
+{
+       const int ioprio = task_ioprio(tsk);
+       struct cfq_queue *cfqq = NULL;
+
+       if (!is_sync)
+               cfqq = cfqd->async_cfqq[ioprio];
+       if (!cfqq)
+               cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+
+       /*
+        * pin the queue now that it's allocated, scheduler exit will prune it
+        */
+       if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+               atomic_inc(&cfqq->ref);
+               cfqd->async_cfqq[ioprio] = cfqq;
+       }
+
+       atomic_inc(&cfqq->ref);
+       return cfqq;
+}
+
 /*
  * We drop cfq io contexts lazily, so we may find a dead one.
  */
@@ -1441,6 +1445,10 @@ static void
 cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
 {
        WARN_ON(!list_empty(&cic->queue_list));
+
+       if (ioc->ioc_data == cic)
+               ioc->ioc_data = NULL;
+
        rb_erase(&cic->rb_node, &ioc->cic_root);
        kmem_cache_free(cfq_ioc_pool, cic);
        elv_ioc_count_dec(ioc_count);
@@ -1453,6 +1461,16 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
        struct cfq_io_context *cic;
        void *k, *key = cfqd;
 
+       if (unlikely(!ioc))
+               return NULL;
+
+       /*
+        * we maintain a last-hit cache, to avoid browsing over the tree
+        */
+       cic = ioc->ioc_data;
+       if (cic && cic->key == cfqd)
+               return cic;
+
 restart:
        n = ioc->cic_root.rb_node;
        while (n) {
@@ -1468,8 +1486,10 @@ restart:
                        n = n->rb_left;
                else if (key > k)
                        n = n->rb_right;
-               else
+               else {
+                       ioc->ioc_data = cic;
                        return cic;
+               }
        }
 
        return NULL;
@@ -1839,10 +1859,8 @@ static int cfq_may_queue(request_queue_t *q, int rw)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct task_struct *tsk = current;
+       struct cfq_io_context *cic;
        struct cfq_queue *cfqq;
-       unsigned int key;
-
-       key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
 
        /*
         * don't force setup of a queue from here, as a call to may_queue
@@ -1850,7 +1868,11 @@ static int cfq_may_queue(request_queue_t *q, int rw)
         * so just lookup a possibly existing queue, or return 'may queue'
         * if that fails
         */
-       cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
+       cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
+       if (!cic)
+               return ELV_MQUEUE_MAY;
+
+       cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
        if (cfqq) {
                cfq_init_prio_data(cfqq);
                cfq_prio_boost(cfqq);
@@ -1894,7 +1916,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
        struct cfq_io_context *cic;
        const int rw = rq_data_dir(rq);
        const int is_sync = rq_is_sync(rq);
-       pid_t key = cfq_queue_pid(tsk, rw, is_sync);
        struct cfq_queue *cfqq;
        unsigned long flags;
 
@@ -1907,14 +1928,15 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
        if (!cic)
                goto queue_fail;
 
-       if (!cic->cfqq[is_sync]) {
-               cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
+       cfqq = cic_to_cfqq(cic, is_sync);
+       if (!cfqq) {
+               cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
+
                if (!cfqq)
                        goto queue_fail;
 
-               cic->cfqq[is_sync] = cfqq;
-       } else
-               cfqq = cic->cfqq[is_sync];
+               cic_set_cfqq(cic, cfqq, is_sync);
+       }
 
        cfqq->allocated[rw]++;
        cfq_clear_cfqq_must_alloc(cfqq);
@@ -2024,6 +2046,7 @@ static void cfq_exit_queue(elevator_t *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        request_queue_t *q = cfqd->queue;
+       int i;
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -2040,35 +2063,31 @@ static void cfq_exit_queue(elevator_t *e)
                __cfq_exit_single_io_context(cfqd, cic);
        }
 
+       /*
+        * Put the async queues
+        */
+       for (i = 0; i < IOPRIO_BE_NR; i++)
+               if (cfqd->async_cfqq[i])        
+                       cfq_put_queue(cfqd->async_cfqq[i]);
+
        spin_unlock_irq(q->queue_lock);
 
        cfq_shutdown_timer_wq(cfqd);
 
-       kfree(cfqd->cfq_hash);
        kfree(cfqd);
 }
 
 static void *cfq_init_queue(request_queue_t *q)
 {
        struct cfq_data *cfqd;
-       int i;
 
-       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+       cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
        if (!cfqd)
                return NULL;
 
-       memset(cfqd, 0, sizeof(*cfqd));
-
        cfqd->service_tree = CFQ_RB_ROOT;
        INIT_LIST_HEAD(&cfqd->cic_list);
 
-       cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
-       if (!cfqd->cfq_hash)
-               goto out_free;
-
-       for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
-               INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
-
        cfqd->queue = q;
 
        init_timer(&cfqd->idle_slice_timer);
@@ -2092,9 +2111,6 @@ static void *cfq_init_queue(request_queue_t *q)
        cfqd->cfq_slice_idle = cfq_slice_idle;
 
        return cfqd;
-out_free:
-       kfree(cfqd);
-       return NULL;
 }
 
 static void cfq_slab_kill(void)
@@ -2107,13 +2123,11 @@ static void cfq_slab_kill(void)
 
 static int __init cfq_slab_setup(void)
 {
-       cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
-                                       NULL, NULL);
+       cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto fail;
 
-       cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
-                       sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+       cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
        if (!cfq_ioc_pool)
                goto fail;