]> err.no Git - linux-2.6/blobdiff - drivers/block/cfq-iosched.c
[PATCH] skge: phy lock deadlock
[linux-2.6] / drivers / block / cfq-iosched.c
index 2210bacad56a34c4023a85036681c51657d19f8b..de5746e38af935a01c45a866ae7598d24108a6ac 100644 (file)
 #include <linux/hash.h>
 #include <linux/rbtree.h>
 #include <linux/mempool.h>
-
-static unsigned long max_elapsed_crq;
-static unsigned long max_elapsed_dispatch;
+#include <linux/ioprio.h>
+#include <linux/writeback.h>
 
 /*
  * tunables
  */
 static int cfq_quantum = 4;            /* max queue in one round of service */
 static int cfq_queued = 8;             /* minimum rq allocate limit per-queue*/
-static int cfq_service = HZ;           /* period over which service is avg */
-static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */
-static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */
-static int cfq_fifo_rate = HZ / 8;     /* fifo expiry rate */
+static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
 static int cfq_back_max = 16 * 1024;   /* maximum backwards seek, in KiB */
 static int cfq_back_penalty = 2;       /* penalty of a backwards seek */
 
+static int cfq_slice_sync = HZ / 10;
+static int cfq_slice_async = HZ / 25;
+static int cfq_slice_async_rq = 2;
+static int cfq_slice_idle = HZ / 100;
+
+#define CFQ_IDLE_GRACE         (HZ / 10)
+#define CFQ_SLICE_SCALE                (5)
+
+#define CFQ_KEY_ASYNC          (0)
+#define CFQ_KEY_ANY            (0xffff)
+
+/*
+ * disable queueing at the driver/hardware level
+ */
+static int cfq_max_depth = 1;
+
 /*
  * for the hash of cfqq inside the cfqd
  */
@@ -55,6 +67,7 @@ static int cfq_back_penalty = 2;      /* penalty of a backwards seek */
 #define list_entry_hash(ptr)   hlist_entry((ptr), struct cfq_rq, hash)
 
 #define list_entry_cfqq(ptr)   list_entry((ptr), struct cfq_queue, cfq_list)
+#define list_entry_fifo(ptr)   list_entry((ptr), struct request, queuelist)
 
 #define RQ_DATA(rq)            (rq)->elevator_private
 
@@ -75,78 +88,110 @@ static int cfq_back_penalty = 2;   /* penalty of a backwards seek */
 #define rb_entry_crq(node)     rb_entry((node), struct cfq_rq, rb_node)
 #define rq_rb_key(rq)          (rq)->sector
 
-/*
- * threshold for switching off non-tag accounting
- */
-#define CFQ_MAX_TAG            (4)
-
-/*
- * sort key types and names
- */
-enum {
-       CFQ_KEY_PGID,
-       CFQ_KEY_TGID,
-       CFQ_KEY_UID,
-       CFQ_KEY_GID,
-       CFQ_KEY_LAST,
-};
-
-static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
-
 static kmem_cache_t *crq_pool;
 static kmem_cache_t *cfq_pool;
 static kmem_cache_t *cfq_ioc_pool;
 
+#define CFQ_PRIO_LISTS         IOPRIO_BE_NR
+#define cfq_class_idle(cfqq)   ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_be(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
+#define cfq_class_rt(cfqq)     ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
+
+#define ASYNC                  (0)
+#define SYNC                   (1)
+
+#define cfq_cfqq_dispatched(cfqq)      \
+       ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
+
+#define cfq_cfqq_class_sync(cfqq)      ((cfqq)->key != CFQ_KEY_ASYNC)
+
+#define cfq_cfqq_sync(cfqq)            \
+       (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
+
+/*
+ * Per block device queue structure
+ */
 struct cfq_data {
-       struct list_head rr_list;
+       atomic_t ref;
+       request_queue_t *queue;
+
+       /*
+        * rr list of queues with requests and the count of them
+        */
+       struct list_head rr_list[CFQ_PRIO_LISTS];
+       struct list_head busy_rr;
+       struct list_head cur_rr;
+       struct list_head idle_rr;
+       unsigned int busy_queues;
+
+       /*
+        * non-ordered list of empty cfqq's
+        */
        struct list_head empty_list;
 
+       /*
+        * cfqq lookup hash
+        */
        struct hlist_head *cfq_hash;
-       struct hlist_head *crq_hash;
 
-       /* queues on rr_list (ie they have pending requests */
-       unsigned int busy_queues;
+       /*
+        * global crq hash for all queues
+        */
+       struct hlist_head *crq_hash;
 
        unsigned int max_queued;
 
-       atomic_t ref;
+       mempool_t *crq_pool;
 
-       int key_type;
+       int rq_in_driver;
 
-       mempool_t *crq_pool;
+       /*
+        * schedule slice state info
+        */
+       /*
+        * idle window management
+        */
+       struct timer_list idle_slice_timer;
+       struct work_struct unplug_work;
 
-       request_queue_t *queue;
+       struct cfq_queue *active_queue;
+       struct cfq_io_context *active_cic;
+       int cur_prio, cur_end_prio;
+       unsigned int dispatch_slice;
+
+       struct timer_list idle_class_timer;
 
        sector_t last_sector;
+       unsigned long last_end_request;
 
-       int rq_in_driver;
+       unsigned int rq_starved;
 
        /*
         * tunables, see top of file
         */
        unsigned int cfq_quantum;
        unsigned int cfq_queued;
-       unsigned int cfq_fifo_expire_r;
-       unsigned int cfq_fifo_expire_w;
-       unsigned int cfq_fifo_batch_expire;
+       unsigned int cfq_fifo_expire[2];
        unsigned int cfq_back_penalty;
        unsigned int cfq_back_max;
-       unsigned int find_best_crq;
-
-       unsigned int cfq_tagged;
+       unsigned int cfq_slice[2];
+       unsigned int cfq_slice_async_rq;
+       unsigned int cfq_slice_idle;
+       unsigned int cfq_max_depth;
 };
 
+/*
+ * Per process-grouping structure
+ */
 struct cfq_queue {
        /* reference count */
        atomic_t ref;
        /* parent cfq_data */
        struct cfq_data *cfqd;
-       /* hash of mergeable requests */
+       /* cfqq lookup hash */
        struct hlist_node cfq_hash;
        /* hash key */
-       unsigned long key;
-       /* whether queue is on rr (or empty) list */
-       int on_rr;
+       unsigned int key;
        /* on either rr or empty list of cfqd */
        struct list_head cfq_list;
        /* sorted list of pending requests */
@@ -158,21 +203,22 @@ struct cfq_queue {
        /* currently allocated requests */
        int allocated[2];
        /* fifo list of requests in sort_list */
-       struct list_head fifo[2];
-       /* last time fifo expired */
-       unsigned long last_fifo_expire;
+       struct list_head fifo;
 
-       int key_type;
+       unsigned long slice_start;
+       unsigned long slice_end;
+       unsigned long slice_left;
+       unsigned long service_last;
 
-       unsigned long service_start;
-       unsigned long service_used;
+       /* number of requests that are on the dispatch list */
+       int on_dispatch[2];
 
-       unsigned int max_rate;
+       /* io prio of this group */
+       unsigned short ioprio, org_ioprio;
+       unsigned short ioprio_class, org_ioprio_class;
 
-       /* number of requests that have been handed to the driver */
-       int in_flight;
-       /* number of currently allocated requests */
-       int alloc_limit[2];
+       /* various state flags, see below */
+       unsigned int flags;
 };
 
 struct cfq_rq {
@@ -184,42 +230,78 @@ struct cfq_rq {
        struct cfq_queue *cfq_queue;
        struct cfq_io_context *io_context;
 
-       unsigned long service_start;
-       unsigned long queue_start;
+       unsigned int crq_flags;
+};
+
+enum cfqq_state_flags {
+       CFQ_CFQQ_FLAG_on_rr = 0,
+       CFQ_CFQQ_FLAG_wait_request,
+       CFQ_CFQQ_FLAG_must_alloc,
+       CFQ_CFQQ_FLAG_must_alloc_slice,
+       CFQ_CFQQ_FLAG_must_dispatch,
+       CFQ_CFQQ_FLAG_fifo_expire,
+       CFQ_CFQQ_FLAG_idle_window,
+       CFQ_CFQQ_FLAG_prio_changed,
+       CFQ_CFQQ_FLAG_expired,
+};
 
-       unsigned int in_flight : 1;
-       unsigned int accounted : 1;
-       unsigned int is_sync   : 1;
-       unsigned int is_write  : 1;
+#define CFQ_CFQQ_FNS(name)                                             \
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)                \
+{                                                                      \
+       cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
+}                                                                      \
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)       \
+{                                                                      \
+       cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
+}                                                                      \
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)                \
+{                                                                      \
+       return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
+}
+
+CFQ_CFQQ_FNS(on_rr);
+CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_alloc);
+CFQ_CFQQ_FNS(must_alloc_slice);
+CFQ_CFQQ_FNS(must_dispatch);
+CFQ_CFQQ_FNS(fifo_expire);
+CFQ_CFQQ_FNS(idle_window);
+CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(expired);
+#undef CFQ_CFQQ_FNS
+
+enum cfq_rq_state_flags {
+       CFQ_CRQ_FLAG_in_flight = 0,
+       CFQ_CRQ_FLAG_in_driver,
+       CFQ_CRQ_FLAG_is_sync,
+       CFQ_CRQ_FLAG_requeued,
 };
 
-static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long);
+#define CFQ_CRQ_FNS(name)                                              \
+static inline void cfq_mark_crq_##name(struct cfq_rq *crq)             \
+{                                                                      \
+       crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name);                   \
+}                                                                      \
+static inline void cfq_clear_crq_##name(struct cfq_rq *crq)            \
+{                                                                      \
+       crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name);                  \
+}                                                                      \
+static inline int cfq_crq_##name(const struct cfq_rq *crq)             \
+{                                                                      \
+       return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0;      \
+}
+
+CFQ_CRQ_FNS(in_flight);
+CFQ_CRQ_FNS(in_driver);
+CFQ_CRQ_FNS(is_sync);
+CFQ_CRQ_FNS(requeued);
+#undef CFQ_CRQ_FNS
+
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
 static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
-static void cfq_update_next_crq(struct cfq_rq *);
 static void cfq_put_cfqd(struct cfq_data *cfqd);
 
-/*
- * what the fairness is based on (ie how processes are grouped and
- * differentiated)
- */
-static inline unsigned long
-cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
-{
-       /*
-        * optimize this so that ->key_type is the offset into the struct
-        */
-       switch (cfqd->key_type) {
-               case CFQ_KEY_PGID:
-                       return process_group(tsk);
-               default:
-               case CFQ_KEY_TGID:
-                       return tsk->tgid;
-               case CFQ_KEY_UID:
-                       return tsk->uid;
-               case CFQ_KEY_GID:
-                       return tsk->gid;
-       }
-}
+#define process_sync(tsk)      ((tsk)->flags & PF_SYNCWRITE)
 
 /*
  * lots of deadline iosched dupes, can be abstracted later...
@@ -235,16 +317,12 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
 
        if (q->last_merge == crq->request)
                q->last_merge = NULL;
-
-       cfq_update_next_crq(crq);
 }
 
 static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
 {
        const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
 
-       BUG_ON(!hlist_unhashed(&crq->hash));
-
        hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
 }
 
@@ -257,8 +335,6 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
                struct cfq_rq *crq = list_entry_hash(entry);
                struct request *__rq = crq->request;
 
-               BUG_ON(hlist_unhashed(&crq->hash));
-
                if (!rq_mergeable(__rq)) {
                        cfq_del_crq_hash(crq);
                        continue;
@@ -271,6 +347,28 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
        return NULL;
 }
 
+static inline int cfq_pending_requests(struct cfq_data *cfqd)
+{
+       return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues;
+}
+
+/*
+ * scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing
+ */
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+{
+       if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd))
+               kblockd_schedule_work(&cfqd->unplug_work);
+}
+
+static int cfq_queue_empty(request_queue_t *q)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
+       return !cfq_pending_requests(cfqd);
+}
+
 /*
  * Lifted from AS - choose which of crq1 and crq2 that is best served now.
  * We choose the request that is closest to the head right now. Distance
@@ -287,36 +385,16 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
                return crq2;
        if (crq2 == NULL)
                return crq1;
+       if (cfq_crq_requeued(crq1))
+               return crq1;
+       if (cfq_crq_requeued(crq2))
+               return crq2;
 
        s1 = crq1->request->sector;
        s2 = crq2->request->sector;
 
        last = cfqd->last_sector;
 
-#if 0
-       if (!list_empty(&cfqd->queue->queue_head)) {
-               struct list_head *entry = &cfqd->queue->queue_head;
-               unsigned long distance = ~0UL;
-               struct request *rq;
-
-               while ((entry = entry->prev) != &cfqd->queue->queue_head) {
-                       rq = list_entry_rq(entry);
-
-                       if (blk_barrier_rq(rq))
-                               break;
-
-                       if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
-                               distance = abs(s1 - rq->sector +rq->nr_sectors);
-                               last = rq->sector + rq->nr_sectors;
-                       }
-                       if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
-                               distance = abs(s2 - rq->sector +rq->nr_sectors);
-                               last = rq->sector + rq->nr_sectors;
-                       }
-               }
-       }
-#endif
-
        /*
         * by definition, 1KiB is 2 sectors
         */
@@ -377,11 +455,14 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
        struct rb_node *rbnext, *rbprev;
 
-       if (!ON_RB(&last->rb_node))
-               return NULL;
-
-       if ((rbnext = rb_next(&last->rb_node)) == NULL)
+       rbnext = NULL;
+       if (ON_RB(&last->rb_node))
+               rbnext = rb_next(&last->rb_node);
+       if (!rbnext) {
                rbnext = rb_first(&cfqq->sort_list);
+               if (rbnext == &last->rb_node)
+                       rbnext = NULL;
+       }
 
        rbprev = rb_prev(&last->rb_node);
 
@@ -401,67 +482,53 @@ static void cfq_update_next_crq(struct cfq_rq *crq)
                cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
 }
 
-static int cfq_check_sort_rr_list(struct cfq_queue *cfqq)
+static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
 {
-       struct list_head *head = &cfqq->cfqd->rr_list;
-       struct list_head *next, *prev;
-
-       /*
-        * list might still be ordered
-        */
-       next = cfqq->cfq_list.next;
-       if (next != head) {
-               struct cfq_queue *cnext = list_entry_cfqq(next);
+       struct cfq_data *cfqd = cfqq->cfqd;
+       struct list_head *list, *entry;
 
-               if (cfqq->service_used > cnext->service_used)
-                       return 1;
-       }
+       BUG_ON(!cfq_cfqq_on_rr(cfqq));
 
-       prev = cfqq->cfq_list.prev;
-       if (prev != head) {
-               struct cfq_queue *cprev = list_entry_cfqq(prev);
+       list_del(&cfqq->cfq_list);
 
-               if (cfqq->service_used < cprev->service_used)
-                       return 1;
+       if (cfq_class_rt(cfqq))
+               list = &cfqd->cur_rr;
+       else if (cfq_class_idle(cfqq))
+               list = &cfqd->idle_rr;
+       else {
+               /*
+                * if cfqq has requests in flight, don't allow it to be
+                * found in cfq_set_active_queue before it has finished them.
+                * this is done to increase fairness between a process that
+                * has lots of io pending vs one that only generates one
+                * sporadically or synchronously
+                */
+               if (cfq_cfqq_dispatched(cfqq))
+                       list = &cfqd->busy_rr;
+               else
+                       list = &cfqd->rr_list[cfqq->ioprio];
        }
 
-       return 0;
-}
-
-static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
-{
-       struct list_head *entry = &cfqq->cfqd->rr_list;
-
-       if (!cfqq->on_rr)
-               return;
-       if (!new_queue && !cfq_check_sort_rr_list(cfqq))
+       /*
+        * if queue was preempted, just add to front to be fair. busy_rr
+        * isn't sorted.
+        */
+       if (preempted || list == &cfqd->busy_rr) {
+               list_add(&cfqq->cfq_list, list);
                return;
-
-       list_del(&cfqq->cfq_list);
+       }
 
        /*
-        * sort by our mean service_used, sub-sort by in-flight requests
+        * sort by when queue was last serviced
         */
-       while ((entry = entry->prev) != &cfqq->cfqd->rr_list) {
+       entry = list;
+       while ((entry = entry->prev) != list) {
                struct cfq_queue *__cfqq = list_entry_cfqq(entry);
 
-               if (cfqq->service_used > __cfqq->service_used)
+               if (!__cfqq->service_last)
+                       break;
+               if (time_before(__cfqq->service_last, cfqq->service_last))
                        break;
-               else if (cfqq->service_used == __cfqq->service_used) {
-                       struct list_head *prv;
-
-                       while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
-                               __cfqq = list_entry_cfqq(prv);
-
-                               WARN_ON(__cfqq->service_used > cfqq->service_used);
-                               if (cfqq->service_used != __cfqq->service_used)
-                                       break;
-                               if (cfqq->in_flight > __cfqq->in_flight)
-                                       break;
-
-                               entry = prv;
-                       }
-               }
        }
 
        list_add(&cfqq->cfq_list, entry);
@@ -469,28 +536,24 @@ static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
 
 /*
  * add to busy list of queues for service, trying to be fair in ordering
- * the pending list according to requests serviced
+ * the pending list according to last request service
  */
 static inline void
-cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue)
 {
-       /*
-        * it's currently on the empty list
-        */
-       cfqq->on_rr = 1;
+       BUG_ON(cfq_cfqq_on_rr(cfqq));
+       cfq_mark_cfqq_on_rr(cfqq);
        cfqd->busy_queues++;
 
-       if (time_after(jiffies, cfqq->service_start + cfq_service))
-               cfqq->service_used >>= 3;
-
-       cfq_sort_rr_list(cfqq, 1);
+       cfq_resort_rr_list(cfqq, requeue);
 }
 
 static inline void
 cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
+       BUG_ON(!cfq_cfqq_on_rr(cfqq));
+       cfq_clear_cfqq_on_rr(cfqq);
        list_move(&cfqq->cfq_list, &cfqd->empty_list);
-       cfqq->on_rr = 0;
 
        BUG_ON(!cfqd->busy_queues);
        cfqd->busy_queues--;
@@ -505,16 +568,17 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq)
 
        if (ON_RB(&crq->rb_node)) {
                struct cfq_data *cfqd = cfqq->cfqd;
+               const int sync = cfq_crq_is_sync(crq);
 
-               BUG_ON(!cfqq->queued[crq->is_sync]);
+               BUG_ON(!cfqq->queued[sync]);
+               cfqq->queued[sync]--;
 
                cfq_update_next_crq(crq);
 
-               cfqq->queued[crq->is_sync]--;
                rb_erase(&crq->rb_node, &cfqq->sort_list);
                RB_CLEAR_COLOR(&crq->rb_node);
 
-               if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr)
+               if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
                        cfq_del_cfqq_rr(cfqd, cfqq);
        }
 }
@@ -550,7 +614,7 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
        struct cfq_rq *__alias;
 
        crq->rb_key = rq_rb_key(rq);
-       cfqq->queued[crq->is_sync]++;
+       cfqq->queued[cfq_crq_is_sync(crq)]++;
 
        /*
         * looks a little odd, but the first insert might return an alias.
@@ -561,8 +625,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq)
 
        rb_insert_color(&crq->rb_node, &cfqq->sort_list);
 
-       if (!cfqq->on_rr)
-               cfq_add_cfqq_rr(cfqd, cfqq);
+       if (!cfq_cfqq_on_rr(cfqq))
+               cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));
 
        /*
         * check if this request is a better next-serve candidate
@@ -575,17 +639,16 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
 {
        if (ON_RB(&crq->rb_node)) {
                rb_erase(&crq->rb_node, &cfqq->sort_list);
-               cfqq->queued[crq->is_sync]--;
+               cfqq->queued[cfq_crq_is_sync(crq)]--;
        }
 
        cfq_add_crq_rb(crq);
 }
 
-static struct request *
-cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+
 {
-       const unsigned long key = cfq_hash_key(cfqd, current);
-       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
+       struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
        struct rb_node *n;
 
        if (!cfqq)
@@ -609,20 +672,25 @@ out:
 
 static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
 {
+       struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_rq *crq = RQ_DATA(rq);
 
        if (crq) {
                struct cfq_queue *cfqq = crq->cfq_queue;
 
-               if (cfqq->cfqd->cfq_tagged) {
-                       cfqq->service_used--;
-                       cfq_sort_rr_list(cfqq, 0);
+               if (cfq_crq_in_driver(crq)) {
+                       cfq_clear_crq_in_driver(crq);
+                       WARN_ON(!cfqd->rq_in_driver);
+                       cfqd->rq_in_driver--;
                }
+               if (cfq_crq_in_flight(crq)) {
+                       const int sync = cfq_crq_is_sync(crq);
 
-               if (crq->accounted) {
-                       crq->accounted = 0;
-                       cfqq->cfqd->rq_in_driver--;
+                       cfq_clear_crq_in_flight(crq);
+                       WARN_ON(!cfqq->on_dispatch[sync]);
+                       cfqq->on_dispatch[sync]--;
                }
+               cfq_mark_crq_requeued(crq);
        }
 }
 
@@ -640,11 +708,10 @@ static void cfq_remove_request(request_queue_t *q, struct request *rq)
        struct cfq_rq *crq = RQ_DATA(rq);
 
        if (crq) {
-               cfq_remove_merge_hints(q, crq);
                list_del_init(&rq->queuelist);
+               cfq_del_crq_rb(crq);
+               cfq_remove_merge_hints(q, crq);
 
-               if (crq->cfq_queue)
-                       cfq_del_crq_rb(crq);
        }
 }
 
@@ -662,21 +729,15 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
        }
 
        __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
-       if (__rq) {
-               BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
-
-               if (elv_rq_merge_ok(__rq, bio)) {
-                       ret = ELEVATOR_BACK_MERGE;
-                       goto out;
-               }
+       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+               ret = ELEVATOR_BACK_MERGE;
+               goto out;
        }
 
        __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
-       if (__rq) {
-               if (elv_rq_merge_ok(__rq, bio)) {
-                       ret = ELEVATOR_FRONT_MERGE;
-                       goto out;
-               }
+       if (__rq && elv_rq_merge_ok(__rq, bio)) {
+               ret = ELEVATOR_FRONT_MERGE;
+               goto out;
        }
 
        return ELEVATOR_NO_MERGE;
@@ -709,235 +770,496 @@ static void
 cfq_merged_requests(request_queue_t *q, struct request *rq,
                    struct request *next)
 {
-       struct cfq_rq *crq = RQ_DATA(rq);
-       struct cfq_rq *cnext = RQ_DATA(next);
-
        cfq_merged_request(q, rq);
 
-       if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
-               if (time_before(cnext->queue_start, crq->queue_start)) {
-                       list_move(&rq->queuelist, &next->queuelist);
-                       crq->queue_start = cnext->queue_start;
-               }
-       }
+       /*
+        * reposition in fifo if next is older than rq
+        */
+       if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
+           time_before(next->start_time, rq->start_time))
+               list_move(&rq->queuelist, &next->queuelist);
 
-       cfq_update_next_crq(cnext);
        cfq_remove_request(q, next);
 }
 
+static inline void
+__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       if (cfqq) {
+               /*
+                * stop potential idle class queues waiting service
+                */
+               del_timer(&cfqd->idle_class_timer);
+
+               cfqq->slice_start = jiffies;
+               cfqq->slice_end = 0;
+               cfqq->slice_left = 0;
+               cfq_clear_cfqq_must_alloc_slice(cfqq);
+               cfq_clear_cfqq_fifo_expire(cfqq);
+               cfq_clear_cfqq_expired(cfqq);
+       }
+
+       cfqd->active_queue = cfqq;
+}
+
 /*
- * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
- * this function sector sorts the selected request to minimize seeks. we start
- * at cfqd->last_sector, not 0.
+ * 0
+ * 0,1
+ * 0,1,2
+ * 0,1,2,3
+ * 0,1,2,3,4
+ * 0,1,2,3,4,5
+ * 0,1,2,3,4,5,6
+ * 0,1,2,3,4,5,6,7
  */
-static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+static int cfq_get_next_prio_level(struct cfq_data *cfqd)
 {
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct list_head *head = &q->queue_head, *entry = head;
-       struct request *__rq;
-       sector_t last;
-
-       cfq_del_crq_rb(crq);
-       cfq_remove_merge_hints(q, crq);
-       list_del(&crq->request->queuelist);
+       int prio, wrap;
 
-       last = cfqd->last_sector;
-       while ((entry = entry->prev) != head) {
-               __rq = list_entry_rq(entry);
+       prio = -1;
+       wrap = 0;
+       do {
+               int p;
 
-               if (blk_barrier_rq(crq->request))
-                       break;
-               if (!blk_fs_request(crq->request))
-                       break;
+               for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
+                       if (!list_empty(&cfqd->rr_list[p])) {
+                               prio = p;
+                               break;
+                       }
+               }
 
-               if (crq->request->sector > __rq->sector)
-                       break;
-               if (__rq->sector > last && crq->request->sector < last) {
-                       last = crq->request->sector;
+               if (prio != -1)
                        break;
+               cfqd->cur_prio = 0;
+               if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+                       cfqd->cur_end_prio = 0;
+                       if (wrap)
+                               break;
+                       wrap = 1;
                }
-       }
+       } while (1);
 
-       cfqd->last_sector = last;
-       crq->in_flight = 1;
-       cfqq->in_flight++;
-       list_add(&crq->request->queuelist, entry);
-}
+       if (unlikely(prio == -1))
+               return -1;
 
-/*
- * return expired entry, or NULL to just start from scratch in rbtree
- */
-static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
-{
-       struct cfq_data *cfqd = cfqq->cfqd;
-       const int reads = !list_empty(&cfqq->fifo[0]);
-       const int writes = !list_empty(&cfqq->fifo[1]);
-       unsigned long now = jiffies;
-       struct cfq_rq *crq;
+       BUG_ON(prio >= CFQ_PRIO_LISTS);
 
-       if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire))
-               return NULL;
+       list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
 
-       crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist));
-       if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) {
-               cfqq->last_fifo_expire = now;
-               return crq;
+       cfqd->cur_prio = prio + 1;
+       if (cfqd->cur_prio > cfqd->cur_end_prio) {
+               cfqd->cur_end_prio = cfqd->cur_prio;
+               cfqd->cur_prio = 0;
        }
-
-       crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist));
-       if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) {
-               cfqq->last_fifo_expire = now;
-               return crq;
+       if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
+               cfqd->cur_prio = 0;
+               cfqd->cur_end_prio = 0;
        }
 
-       return NULL;
+       return prio;
 }
 
-/*
- * dispatch a single request from given queue
- */
-static inline void
-cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd,
-                    struct cfq_queue *cfqq)
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
 {
-       struct cfq_rq *crq;
+       struct cfq_queue *cfqq;
 
        /*
-        * follow expired path, else get first next available
+        * if current queue is expired but not done with its requests yet,
+        * wait for that to happen
         */
-       if ((crq = cfq_check_fifo(cfqq)) == NULL) {
-               if (cfqd->find_best_crq)
-                       crq = cfqq->next_crq;
-               else
-                       crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+       if ((cfqq = cfqd->active_queue) != NULL) {
+               if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))
+                       return NULL;
        }
 
-       cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
+       /*
+        * if current list is non-empty, grab first entry. if it is empty,
+        * get next prio level and grab first entry then if any are spliced
+        */
+       if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
+               cfqq = list_entry_cfqq(cfqd->cur_rr.next);
 
        /*
-        * finally, insert request into driver list
+        * if we have idle queues and no rt or be queues had pending
+        * requests, either allow immediate service if the grace period
+        * has passed or arm the idle grace timer
         */
-       cfq_dispatch_sort(q, crq);
+       if (!cfqq && !list_empty(&cfqd->idle_rr)) {
+               unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+
+               if (time_after_eq(jiffies, end))
+                       cfqq = list_entry_cfqq(cfqd->idle_rr.next);
+               else
+                       mod_timer(&cfqd->idle_class_timer, end);
+       }
+
+       __cfq_set_active_queue(cfqd, cfqq);
+       return cfqq;
 }
 
-static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch)
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                   int preempted)
 {
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq;
-       struct list_head *entry, *tmp;
-       int queued, busy_queues, first_round;
-
-       if (list_empty(&cfqd->rr_list))
-               return 0;
+       unsigned long now = jiffies;
 
-       queued = 0;
-       first_round = 1;
-restart:
-       busy_queues = 0;
-       list_for_each_safe(entry, tmp, &cfqd->rr_list) {
-               cfqq = list_entry_cfqq(entry);
+       if (cfq_cfqq_wait_request(cfqq))
+               del_timer(&cfqd->idle_slice_timer);
 
-               BUG_ON(RB_EMPTY(&cfqq->sort_list));
+       if (!preempted && !cfq_cfqq_dispatched(cfqq))
+               cfqq->service_last = now;
 
-               /*
-                * first round of queueing, only select from queues that
-                * don't already have io in-flight
-                */
-               if (first_round && cfqq->in_flight)
-                       continue;
+       cfq_clear_cfqq_must_dispatch(cfqq);
+       cfq_clear_cfqq_wait_request(cfqq);
 
-               cfq_dispatch_request(q, cfqd, cfqq);
+       /*
+        * store what was left of this slice, if the queue idled out
+        * or was preempted
+        */
+       if (time_after(now, cfqq->slice_end))
+               cfqq->slice_left = now - cfqq->slice_end;
+       else
+               cfqq->slice_left = 0;
 
-               if (!RB_EMPTY(&cfqq->sort_list))
-                       busy_queues++;
+       if (cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, preempted);
 
-               queued++;
-       }
+       if (cfqq == cfqd->active_queue)
+               cfqd->active_queue = NULL;
 
-       if ((queued < max_dispatch) && (busy_queues || first_round)) {
-               first_round = 0;
-               goto restart;
+       if (cfqd->active_cic) {
+               put_io_context(cfqd->active_cic->ioc);
+               cfqd->active_cic = NULL;
        }
 
-       return queued;
+       cfqd->dispatch_slice = 0;
 }
 
-static inline void cfq_account_dispatch(struct cfq_rq *crq)
+static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
 {
-       struct cfq_queue *cfqq = crq->cfq_queue;
-       struct cfq_data *cfqd = cfqq->cfqd;
-       unsigned long now, elapsed;
+       struct cfq_queue *cfqq = cfqd->active_queue;
 
-       if (!blk_fs_request(crq->request))
-               return;
+       if (cfqq) {
+               /*
+                * use deferred expiry, if there are requests in progress as
+                * not to disturb the slice of the next queue
+                */
+               if (cfq_cfqq_dispatched(cfqq))
+                       cfq_mark_cfqq_expired(cfqq);
+               else
+                       __cfq_slice_expired(cfqd, cfqq, preempted);
+       }
+}
 
-       /*
-        * accounted bit is necessary since some drivers will call
-        * elv_next_request() many times for the same request (eg ide)
-        */
-       if (crq->accounted)
-               return;
+static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 
-       now = jiffies;
-       if (cfqq->service_start == ~0UL)
-               cfqq->service_start = now;
+{
+       WARN_ON(!RB_EMPTY(&cfqq->sort_list));
+       WARN_ON(cfqq != cfqd->active_queue);
 
        /*
-        * on drives with tagged command queueing, command turn-around time
-        * doesn't necessarily reflect the time spent processing this very
-        * command inside the drive. so do the accounting differently there,
-        * by just sorting on the number of requests
+        * idle is disabled, either manually or by past process history
         */
-       if (cfqd->cfq_tagged) {
-               if (time_after(now, cfqq->service_start + cfq_service)) {
-                       cfqq->service_start = now;
-                       cfqq->service_used /= 10;
-               }
-
-               cfqq->service_used++;
-               cfq_sort_rr_list(cfqq, 0);
-       }
+       if (!cfqd->cfq_slice_idle)
+               return 0;
+       if (!cfq_cfqq_idle_window(cfqq))
+               return 0;
+       /*
+        * task has exited, don't wait
+        */
+       if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
+               return 0;
 
-       elapsed = now - crq->queue_start;
-       if (elapsed > max_elapsed_dispatch)
-               max_elapsed_dispatch = elapsed;
+       cfq_mark_cfqq_must_dispatch(cfqq);
+       cfq_mark_cfqq_wait_request(cfqq);
 
-       crq->accounted = 1;
-       crq->service_start = now;
+       if (!timer_pending(&cfqd->idle_slice_timer)) {
+               unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
 
-       if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
-               cfqq->cfqd->cfq_tagged = 1;
-               printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
+               cfqd->idle_slice_timer.expires = jiffies + slice_left;
+               add_timer(&cfqd->idle_slice_timer);
        }
+
+       return 1;
 }
 
-static inline void
-cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
+/*
+ * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
+ * this function sector sorts the selected request to minimize seeks. we start
+ * at cfqd->last_sector, not 0.
+ */
+static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct list_head *head = &q->queue_head, *entry = head;
+       struct request *__rq;
+       sector_t last;
+
+       list_del(&crq->request->queuelist);
+
+       last = cfqd->last_sector;
+       list_for_each_entry_reverse(__rq, head, queuelist) {
+               struct cfq_rq *__crq = RQ_DATA(__rq);
+
+               if (blk_barrier_rq(__rq))
+                       break;
+               if (!blk_fs_request(__rq))
+                       break;
+               if (cfq_crq_requeued(__crq))
+                       break;
+
+               if (__rq->sector <= crq->request->sector)
+                       break;
+               if (__rq->sector > last && crq->request->sector < last) {
+                       last = crq->request->sector + crq->request->nr_sectors;
+                       break;
+               }
+               entry = &__rq->queuelist;
+       }
+
+       cfqd->last_sector = last;
+
+       cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
+
+       cfq_del_crq_rb(crq);
+       cfq_remove_merge_hints(q, crq);
+
+       cfq_mark_crq_in_flight(crq);
+       cfq_clear_crq_requeued(crq);
+
+       cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
+       list_add_tail(&crq->request->queuelist, entry);
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
 {
        struct cfq_data *cfqd = cfqq->cfqd;
+       struct request *rq;
+       struct cfq_rq *crq;
 
-       if (!crq->accounted)
+       if (cfq_cfqq_fifo_expire(cfqq))
+               return NULL;
+
+       if (!list_empty(&cfqq->fifo)) {
+               int fifo = cfq_cfqq_class_sync(cfqq);
+
+               crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
+               rq = crq->request;
+               if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
+                       cfq_mark_cfqq_fifo_expire(cfqq);
+                       return crq;
+               }
+       }
+
+       return NULL;
+}
+
+/*
+ * Scale schedule slice based on io priority. Use the sync time slice only
+ * if a queue is marked sync and has sync io queued. A sync queue with async
+ * io only, should not get full sync slice length.
+ */
+static inline int
+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
+
+       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+       return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+}
+
+static inline int
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       const int base_rq = cfqd->cfq_slice_async_rq;
+
+       WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+       return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
+}
+
+/*
+ * get next queue for service
+ */
+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
+{
+       unsigned long now = jiffies;
+       struct cfq_queue *cfqq;
+
+       cfqq = cfqd->active_queue;
+       if (!cfqq)
+               goto new_queue;
+
+       if (cfq_cfqq_expired(cfqq))
+               goto new_queue;
+
+       /*
+        * slice has expired
+        */
+       if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
+               goto expire;
+
+       /*
+        * if queue has requests, dispatch one. if not, check if
+        * enough slice is left to wait for one
+        */
+       if (!RB_EMPTY(&cfqq->sort_list))
+               goto keep_queue;
+       else if (!force && cfq_cfqq_class_sync(cfqq) &&
+                time_before(now, cfqq->slice_end)) {
+               if (cfq_arm_slice_timer(cfqd, cfqq))
+                       return NULL;
+       }
+
+expire:
+       cfq_slice_expired(cfqd, 0);
+new_queue:
+       cfqq = cfq_set_active_queue(cfqd);
+keep_queue:
+       return cfqq;
+}
+
+static int
+__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                       int max_dispatch)
+{
+       int dispatched = 0;
+
+       BUG_ON(RB_EMPTY(&cfqq->sort_list));
+
+       do {
+               struct cfq_rq *crq;
+
+               /*
+                * follow expired path, else get first next available
+                */
+               if ((crq = cfq_check_fifo(cfqq)) == NULL)
+                       crq = cfqq->next_crq;
+
+               /*
+                * finally, insert request into driver dispatch list
+                */
+               cfq_dispatch_sort(cfqd->queue, crq);
+
+               cfqd->dispatch_slice++;
+               dispatched++;
+
+               if (!cfqd->active_cic) {
+                       atomic_inc(&crq->io_context->ioc->refcount);
+                       cfqd->active_cic = crq->io_context;
+               }
+
+               if (RB_EMPTY(&cfqq->sort_list))
+                       break;
+
+       } while (dispatched < max_dispatch);
+
+       /*
+        * if slice end isn't set yet, set it. if at least one request was
+        * sync, use the sync time slice value
+        */
+       if (!cfqq->slice_end)
+               cfq_set_prio_slice(cfqd, cfqq);
+
+       /*
+        * expire an async queue immediately if it has used up its slice. idle
+        * queue always expire after 1 dispatch round.
+        */
+       if ((!cfq_cfqq_sync(cfqq) &&
+           cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+           cfq_class_idle(cfqq))
+               cfq_slice_expired(cfqd, 0);
+
+       return dispatched;
+}
+
+static int
+cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct cfq_queue *cfqq;
+
+       if (!cfqd->busy_queues)
+               return 0;
+
+       cfqq = cfq_select_queue(cfqd, force);
+       if (cfqq) {
+               cfq_clear_cfqq_must_dispatch(cfqq);
+               cfq_clear_cfqq_wait_request(cfqq);
+               del_timer(&cfqd->idle_slice_timer);
+
+               if (cfq_class_idle(cfqq))
+                       max_dispatch = 1;
+
+               return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+       }
+
+       return 0;
+}
+
+static inline void cfq_account_dispatch(struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = crq->cfq_queue;
+       struct cfq_data *cfqd = cfqq->cfqd;
+
+       if (unlikely(!blk_fs_request(crq->request)))
+               return;
+
+       /*
+        * accounted bit is necessary since some drivers will call
+        * elv_next_request() many times for the same request (eg ide)
+        */
+       if (cfq_crq_in_driver(crq))
+               return;
+
+       cfq_mark_crq_in_driver(crq);
+       cfqd->rq_in_driver++;
+}
+
+static inline void
+cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+       struct cfq_data *cfqd = cfqq->cfqd;
+       unsigned long now;
+
+       if (!cfq_crq_in_driver(crq))
                return;
 
+       now = jiffies;
+
        WARN_ON(!cfqd->rq_in_driver);
        cfqd->rq_in_driver--;
 
-       if (!cfqd->cfq_tagged) {
-               unsigned long now = jiffies;
-               unsigned long duration = now - crq->service_start;
+       if (!cfq_class_idle(cfqq))
+               cfqd->last_end_request = now;
 
-               if (time_after(now, cfqq->service_start + cfq_service)) {
-                       cfqq->service_start = now;
-                       cfqq->service_used >>= 3;
+       if (!cfq_cfqq_dispatched(cfqq)) {
+               if (cfq_cfqq_on_rr(cfqq)) {
+                       cfqq->service_last = now;
+                       cfq_resort_rr_list(cfqq, 0);
+               }
+               if (cfq_cfqq_expired(cfqq)) {
+                       __cfq_slice_expired(cfqd, cfqq, 0);
+                       cfq_schedule_dispatch(cfqd);
                }
-
-               cfqq->service_used += duration;
-               cfq_sort_rr_list(cfqq, 0);
-
-               if (duration > max_elapsed_crq)
-                       max_elapsed_crq = duration;
        }
+
+       if (cfq_crq_is_sync(crq))
+               crq->io_context->last_end_request = now;
 }
 
 static struct request *cfq_next_request(request_queue_t *q)
@@ -950,7 +1272,18 @@ static struct request *cfq_next_request(request_queue_t *q)
 dispatch:
                rq = list_entry_rq(q->queue_head.next);
 
-               if ((crq = RQ_DATA(rq)) != NULL) {
+               crq = RQ_DATA(rq);
+               if (crq) {
+                       struct cfq_queue *cfqq = crq->cfq_queue;
+
+                       /*
+                        * if idle window is disabled, allow queue buildup
+                        */
+                       if (!cfq_crq_in_driver(crq) &&
+                           !cfq_cfqq_idle_window(cfqq) &&
+                           cfqd->rq_in_driver >= cfqd->cfq_max_depth)
+                               return NULL;
+
                        cfq_remove_merge_hints(q, crq);
                        cfq_account_dispatch(crq);
                }
@@ -958,7 +1291,7 @@ dispatch:
                return rq;
        }
 
-       if (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+       if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
                goto dispatch;
 
        return NULL;
@@ -972,13 +1305,21 @@ dispatch:
  */
 static void cfq_put_queue(struct cfq_queue *cfqq)
 {
-       BUG_ON(!atomic_read(&cfqq->ref));
+       struct cfq_data *cfqd = cfqq->cfqd;
+
+       BUG_ON(atomic_read(&cfqq->ref) <= 0);
 
        if (!atomic_dec_and_test(&cfqq->ref))
                return;
 
        BUG_ON(rb_first(&cfqq->sort_list));
-       BUG_ON(cfqq->on_rr);
+       BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
+       BUG_ON(cfq_cfqq_on_rr(cfqq));
+
+       if (unlikely(cfqd->active_queue == cfqq)) {
+               __cfq_slice_expired(cfqd, cfqq, 0);
+               cfq_schedule_dispatch(cfqd);
+       }
 
        cfq_put_cfqd(cfqq->cfqd);
 
@@ -991,15 +1332,17 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 }
 
 static inline struct cfq_queue *
-__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
+                   const int hashval)
 {
        struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
        struct hlist_node *entry, *next;
 
        hlist_for_each_safe(entry, next, hash_list) {
                struct cfq_queue *__cfqq = list_entry_qhash(entry);
+               const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
 
-               if (__cfqq->key == key)
+               if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
                        return __cfqq;
        }
 
@@ -1007,94 +1350,220 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
 }
 
 static struct cfq_queue *
-cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key)
+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
 {
-       return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
+       return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
 }
 
-static inline void
-cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
-               struct cfq_io_context *cic)
+static void cfq_free_io_context(struct cfq_io_context *cic)
 {
-       unsigned long hashkey = cfq_hash_key(cfqd, current);
-       unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
-       struct cfq_queue *__cfqq;
-       unsigned long flags;
-
-       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-
-       hlist_del(&(*cfqq)->cfq_hash);
+       struct cfq_io_context *__cic;
+       struct list_head *entry, *next;
 
-       __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
-       if (!__cfqq || __cfqq == *cfqq) {
-               __cfqq = *cfqq;
-               hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
-               __cfqq->key_type = cfqd->key_type;
-       } else {
-               atomic_inc(&__cfqq->ref);
-               cic->cfqq = __cfqq;
-               cfq_put_queue(*cfqq);
-               *cfqq = __cfqq;
+       list_for_each_safe(entry, next, &cic->list) {
+               __cic = list_entry(entry, struct cfq_io_context, list);
+               kmem_cache_free(cfq_ioc_pool, __cic);
        }
 
-       cic->cfqq = __cfqq;
-       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+       kmem_cache_free(cfq_ioc_pool, cic);
 }
 
-static void cfq_free_io_context(struct cfq_io_context *cic)
+/*
+ * Called with interrupts disabled
+ */
+static void cfq_exit_single_io_context(struct cfq_io_context *cic)
 {
-       kmem_cache_free(cfq_ioc_pool, cic);
+       struct cfq_data *cfqd = cic->cfqq->cfqd;
+       request_queue_t *q = cfqd->queue;
+
+       WARN_ON(!irqs_disabled());
+
+       spin_lock(q->queue_lock);
+
+       if (unlikely(cic->cfqq == cfqd->active_queue)) {
+               __cfq_slice_expired(cfqd, cic->cfqq, 0);
+               cfq_schedule_dispatch(cfqd);
+       }
+
+       cfq_put_queue(cic->cfqq);
+       cic->cfqq = NULL;
+       spin_unlock(q->queue_lock);
 }
 
 /*
- * locking hierarchy is: io_context lock -> queue locks
+ * Another task may update the task cic list, if it is doing a queue lookup
+ * on its behalf. cfq_cic_lock excludes such concurrent updates
  */
 static void cfq_exit_io_context(struct cfq_io_context *cic)
 {
-       struct cfq_queue *cfqq = cic->cfqq;
-       struct list_head *entry = &cic->list;
-       request_queue_t *q;
+       struct cfq_io_context *__cic;
+       struct list_head *entry;
        unsigned long flags;
 
+       local_irq_save(flags);
+
        /*
         * put the reference this task is holding to the various queues
         */
-       spin_lock_irqsave(&cic->ioc->lock, flags);
-       while ((entry = cic->list.next) != &cic->list) {
-               struct cfq_io_context *__cic;
-
+       list_for_each(entry, &cic->list) {
                __cic = list_entry(entry, struct cfq_io_context, list);
-               list_del(entry);
-
-               q = __cic->cfqq->cfqd->queue;
-               spin_lock(q->queue_lock);
-               cfq_put_queue(__cic->cfqq);
-               spin_unlock(q->queue_lock);
+               cfq_exit_single_io_context(__cic);
        }
 
-       q = cfqq->cfqd->queue;
-       spin_lock(q->queue_lock);
-       cfq_put_queue(cfqq);
-       spin_unlock(q->queue_lock);
-
-       cic->cfqq = NULL;
-       spin_unlock_irqrestore(&cic->ioc->lock, flags);
+       cfq_exit_single_io_context(cic);
+       local_irq_restore(flags);
 }
 
-static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
+static struct cfq_io_context *
+cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
 {
-       struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags);
+       struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
 
        if (cic) {
-               cic->dtor = cfq_free_io_context;
-               cic->exit = cfq_exit_io_context;
                INIT_LIST_HEAD(&cic->list);
                cic->cfqq = NULL;
+               cic->key = NULL;
+               cic->last_end_request = jiffies;
+               cic->ttime_total = 0;
+               cic->ttime_samples = 0;
+               cic->ttime_mean = 0;
+               cic->dtor = cfq_free_io_context;
+               cic->exit = cfq_exit_io_context;
        }
 
        return cic;
 }
 
+static void cfq_init_prio_data(struct cfq_queue *cfqq)
+{
+       struct task_struct *tsk = current;
+       int ioprio_class;
+
+       if (!cfq_cfqq_prio_changed(cfqq))
+               return;
+
+       ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
+       switch (ioprio_class) {
+               default:
+                       printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+               case IOPRIO_CLASS_NONE:
+                       /*
+                        * no prio set, place us in the middle of the BE classes
+                        */
+                       cfqq->ioprio = task_nice_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+                       break;
+               case IOPRIO_CLASS_RT:
+                       cfqq->ioprio = task_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_RT;
+                       break;
+               case IOPRIO_CLASS_BE:
+                       cfqq->ioprio = task_ioprio(tsk);
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+                       break;
+               case IOPRIO_CLASS_IDLE:
+                       cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+                       cfqq->ioprio = 7;
+                       cfq_clear_cfqq_idle_window(cfqq);
+                       break;
+       }
+
+       /*
+        * keep track of original prio settings in case we have to temporarily
+        * elevate the priority of this queue
+        */
+       cfqq->org_ioprio = cfqq->ioprio;
+       cfqq->org_ioprio_class = cfqq->ioprio_class;
+
+       if (cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, 0);
+
+       cfq_clear_cfqq_prio_changed(cfqq);
+}
+
+static inline void changed_ioprio(struct cfq_queue *cfqq)
+{
+       if (cfqq) {
+               struct cfq_data *cfqd = cfqq->cfqd;
+
+               spin_lock(cfqd->queue->queue_lock);
+               cfq_mark_cfqq_prio_changed(cfqq);
+               cfq_init_prio_data(cfqq);
+               spin_unlock(cfqd->queue->queue_lock);
+       }
+}
+
+/*
+ * callback from sys_ioprio_set, irqs are disabled
+ */
+static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
+{
+       struct cfq_io_context *cic = ioc->cic;
+
+       changed_ioprio(cic->cfqq);
+
+       list_for_each_entry(cic, &cic->list, list)
+               changed_ioprio(cic->cfqq);
+
+       return 0;
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
+             int gfp_mask)
+{
+       const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
+       struct cfq_queue *cfqq, *new_cfqq = NULL;
+
+retry:
+       cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
+
+       if (!cfqq) {
+               if (new_cfqq) {
+                       cfqq = new_cfqq;
+                       new_cfqq = NULL;
+               } else if (gfp_mask & __GFP_WAIT) {
+                       spin_unlock_irq(cfqd->queue->queue_lock);
+                       new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+                       spin_lock_irq(cfqd->queue->queue_lock);
+                       goto retry;
+               } else {
+                       cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+                       if (!cfqq)
+                               goto out;
+               }
+
+               memset(cfqq, 0, sizeof(*cfqq));
+
+               INIT_HLIST_NODE(&cfqq->cfq_hash);
+               INIT_LIST_HEAD(&cfqq->cfq_list);
+               RB_CLEAR_ROOT(&cfqq->sort_list);
+               INIT_LIST_HEAD(&cfqq->fifo);
+
+               cfqq->key = key;
+               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+               atomic_set(&cfqq->ref, 0);
+               cfqq->cfqd = cfqd;
+               atomic_inc(&cfqd->ref);
+               cfqq->service_last = 0;
+               /*
+                * set ->slice_left to allow preemption for a new process
+                */
+               cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
+               cfq_mark_cfqq_idle_window(cfqq);
+               cfq_mark_cfqq_prio_changed(cfqq);
+               cfq_init_prio_data(cfqq);
+       }
+
+       if (new_cfqq)
+               kmem_cache_free(cfq_pool, new_cfqq);
+
+       atomic_inc(&cfqq->ref);
+out:
+       WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
+       return cfqq;
+}
+
 /*
  * Setup general io context and cfq io context. There can be several cfq
  * io contexts per general io context, if this process is doing io to more
@@ -1102,39 +1571,39 @@ static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
  * cfqq, so we don't need to worry about it disappearing
  */
 static struct cfq_io_context *
-cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
+cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
 {
-       struct cfq_data *cfqd = (*cfqq)->cfqd;
-       struct cfq_queue *__cfqq = *cfqq;
+       struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
-       struct io_context *ioc;
 
-       might_sleep_if(gfp_flags & __GFP_WAIT);
+       might_sleep_if(gfp_mask & __GFP_WAIT);
 
-       ioc = get_io_context(gfp_flags);
+       ioc = get_io_context(gfp_mask);
        if (!ioc)
                return NULL;
 
        if ((cic = ioc->cic) == NULL) {
-               cic = cfq_alloc_io_context(gfp_flags);
+               cic = cfq_alloc_io_context(cfqd, gfp_mask);
 
                if (cic == NULL)
                        goto err;
 
+               /*
+                * manually increment generic io_context usage count, it
+                * cannot go away since we are already holding one ref to it
+                */
                ioc->cic = cic;
+               ioc->set_ioprio = cfq_ioc_set_ioprio;
                cic->ioc = ioc;
-               cic->cfqq = __cfqq;
-               atomic_inc(&__cfqq->ref);
+               cic->key = cfqd;
+               atomic_inc(&cfqd->ref);
        } else {
                struct cfq_io_context *__cic;
-               unsigned long flags;
 
                /*
-                * since the first cic on the list is actually the head
-                * itself, need to check this here or we'll duplicate an
-                * cic per ioc for no reason
+                * the first cic on the list is actually the head itself
                 */
-               if (cic->cfqq == __cfqq)
+               if (cic->key == cfqd)
                        goto out;
 
                /*
@@ -1142,152 +1611,250 @@ cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
                 * should be ok here, the list will usually not be more than
                 * 1 or a few entries long
                 */
-               spin_lock_irqsave(&ioc->lock, flags);
                list_for_each_entry(__cic, &cic->list, list) {
                        /*
                         * this process is already holding a reference to
                         * this queue, so no need to get one more
                         */
-                       if (__cic->cfqq == __cfqq) {
+                       if (__cic->key == cfqd) {
                                cic = __cic;
-                               spin_unlock_irqrestore(&ioc->lock, flags);
                                goto out;
                        }
                }
-               spin_unlock_irqrestore(&ioc->lock, flags);
 
                /*
                 * nope, process doesn't have a cic assoicated with this
                 * cfqq yet. get a new one and add to list
                 */
-               __cic = cfq_alloc_io_context(gfp_flags);
+               __cic = cfq_alloc_io_context(cfqd, gfp_mask);
                if (__cic == NULL)
                        goto err;
 
                __cic->ioc = ioc;
-               __cic->cfqq = __cfqq;
-               atomic_inc(&__cfqq->ref);
-               spin_lock_irqsave(&ioc->lock, flags);
+               __cic->key = cfqd;
+               atomic_inc(&cfqd->ref);
                list_add(&__cic->list, &cic->list);
-               spin_unlock_irqrestore(&ioc->lock, flags);
-
                cic = __cic;
-               *cfqq = __cfqq;
        }
 
-out:
-       /*
-        * if key_type has been changed on the fly, we lazily rehash
-        * each queue at lookup time
-        */
-       if ((*cfqq)->key_type != cfqd->key_type)
-               cfq_rehash_cfqq(cfqd, cfqq, cic);
+out:
+       return cic;
+err:
+       put_io_context(ioc);
+       return NULL;
+}
+
+static void
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
+{
+       unsigned long elapsed, ttime;
+
+       /*
+        * if this context already has stuff queued, thinktime is from
+        * last queue not last end
+        */
+#if 0
+       if (time_after(cic->last_end_request, cic->last_queue))
+               elapsed = jiffies - cic->last_end_request;
+       else
+               elapsed = jiffies - cic->last_queue;
+#else
+               elapsed = jiffies - cic->last_end_request;
+#endif
+
+       ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
+
+       cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
+       cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
+       cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
+}
+
+#define sample_valid(samples)  ((samples) > 80)
+
+/*
+ * Disable idle window if the process thinks too long or seeks so much that
+ * it doesn't matter
+ */
+static void
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                      struct cfq_io_context *cic)
+{
+       int enable_idle = cfq_cfqq_idle_window(cfqq);
+
+       if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+               enable_idle = 0;
+       else if (sample_valid(cic->ttime_samples)) {
+               if (cic->ttime_mean > cfqd->cfq_slice_idle)
+                       enable_idle = 0;
+               else
+                       enable_idle = 1;
+       }
+
+       if (enable_idle)
+               cfq_mark_cfqq_idle_window(cfqq);
+       else
+               cfq_clear_cfqq_idle_window(cfqq);
+}
+
+
+/*
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
+ * no or if we aren't sure, a 1 will cause a preempt.
+ */
+static int
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+                  struct cfq_rq *crq)
+{
+       struct cfq_queue *cfqq = cfqd->active_queue;
+
+       if (cfq_class_idle(new_cfqq))
+               return 0;
+
+       if (!cfqq)
+               return 1;
+
+       if (cfq_class_idle(cfqq))
+               return 1;
+       if (!cfq_cfqq_wait_request(new_cfqq))
+               return 0;
+       /*
+        * if it doesn't have slice left, forget it
+        */
+       if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
+               return 0;
+       if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct cfq_queue *__cfqq, *next;
+
+       list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
+               cfq_resort_rr_list(__cfqq, 1);
 
-       return cic;
-err:
-       put_io_context(ioc);
-       return NULL;
+       if (!cfqq->slice_left)
+               cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
+
+       cfqq->slice_end = cfqq->slice_left + jiffies;
+       __cfq_slice_expired(cfqd, cfqq, 1);
+       __cfq_set_active_queue(cfqd, cfqq);
 }
 
-static struct cfq_queue *
-__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
+/*
+ * should really be a ll_rw_blk.c helper
+ */
+static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
-       struct cfq_queue *cfqq, *new_cfqq = NULL;
-
-retry:
-       cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
+       request_queue_t *q = cfqd->queue;
 
-       if (!cfqq) {
-               if (new_cfqq) {
-                       cfqq = new_cfqq;
-                       new_cfqq = NULL;
-               } else {
-                       spin_unlock_irq(cfqd->queue->queue_lock);
-                       new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
-                       spin_lock_irq(cfqd->queue->queue_lock);
+       if (!blk_queue_plugged(q))
+               q->request_fn(q);
+       else
+               __generic_unplug_device(q);
+}
 
-                       if (!new_cfqq && !(gfp_mask & __GFP_WAIT))
-                               goto out;
+/*
+ * Called when a new fs request (crq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+                struct cfq_rq *crq)
+{
+       const int sync = cfq_crq_is_sync(crq);
 
-                       goto retry;
-               }
+       cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
 
-               memset(cfqq, 0, sizeof(*cfqq));
+       if (sync) {
+               struct cfq_io_context *cic = crq->io_context;
 
-               INIT_HLIST_NODE(&cfqq->cfq_hash);
-               INIT_LIST_HEAD(&cfqq->cfq_list);
-               RB_CLEAR_ROOT(&cfqq->sort_list);
-               INIT_LIST_HEAD(&cfqq->fifo[0]);
-               INIT_LIST_HEAD(&cfqq->fifo[1]);
+               cfq_update_io_thinktime(cfqd, cic);
+               cfq_update_idle_window(cfqd, cfqq, cic);
 
-               cfqq->key = key;
-               hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
-               atomic_set(&cfqq->ref, 0);
-               cfqq->cfqd = cfqd;
-               atomic_inc(&cfqd->ref);
-               cfqq->key_type = cfqd->key_type;
-               cfqq->service_start = ~0UL;
+               cic->last_queue = jiffies;
        }
 
-       if (new_cfqq)
-               kmem_cache_free(cfq_pool, new_cfqq);
-
-       atomic_inc(&cfqq->ref);
-out:
-       WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
-       return cfqq;
+       if (cfqq == cfqd->active_queue) {
+               /*
+                * if we are waiting for a request for this queue, let it rip
+                * immediately and flag that we must not expire this queue
+                * just now
+                */
+               if (cfq_cfqq_wait_request(cfqq)) {
+                       cfq_mark_cfqq_must_dispatch(cfqq);
+                       del_timer(&cfqd->idle_slice_timer);
+                       cfq_start_queueing(cfqd, cfqq);
+               }
+       } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
+               /*
+                * not the active queue - expire current slice if it is
+                * idle and has expired it's mean thinktime or this new queue
+                * has some old slice time left and is of higher priority
+                */
+               cfq_preempt_queue(cfqd, cfqq);
+               cfq_mark_cfqq_must_dispatch(cfqq);
+               cfq_start_queueing(cfqd, cfqq);
+       }
 }
 
-static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
+static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
 {
-       crq->is_sync = 0;
-       if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE)
-               crq->is_sync = 1;
+       struct cfq_rq *crq = RQ_DATA(rq);
+       struct cfq_queue *cfqq = crq->cfq_queue;
+
+       cfq_init_prio_data(cfqq);
 
        cfq_add_crq_rb(crq);
-       crq->queue_start = jiffies;
 
-       list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]);
+       list_add_tail(&rq->queuelist, &cfqq->fifo);
+
+       if (rq_mergeable(rq)) {
+               cfq_add_crq_hash(cfqd, crq);
+
+               if (!cfqd->queue->last_merge)
+                       cfqd->queue->last_merge = rq;
+       }
+
+       cfq_crq_enqueued(cfqd, cfqq, crq);
 }
 
 static void
 cfq_insert_request(request_queue_t *q, struct request *rq, int where)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_rq *crq = RQ_DATA(rq);
 
        switch (where) {
                case ELEVATOR_INSERT_BACK:
-                       while (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+                       while (cfq_dispatch_requests(q, INT_MAX, 1))
                                ;
                        list_add_tail(&rq->queuelist, &q->queue_head);
+                       /*
+                        * If we were idling with pending requests on
+                        * inactive cfqqs, force dispatching will
+                        * remove the idle timer and the queue won't
+                        * be kicked by __make_request() afterward.
+                        * Kick it here.
+                        */
+                       cfq_schedule_dispatch(cfqd);
                        break;
                case ELEVATOR_INSERT_FRONT:
                        list_add(&rq->queuelist, &q->queue_head);
                        break;
                case ELEVATOR_INSERT_SORT:
                        BUG_ON(!blk_fs_request(rq));
-                       cfq_enqueue(cfqd, crq);
+                       cfq_enqueue(cfqd, rq);
                        break;
                default:
                        printk("%s: bad insert point %d\n", __FUNCTION__,where);
                        return;
        }
-
-       if (rq_mergeable(rq)) {
-               cfq_add_crq_hash(cfqd, crq);
-
-               if (!q->last_merge)
-                       q->last_merge = rq;
-       }
-}
-
-static int cfq_queue_empty(request_queue_t *q)
-{
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-
-       return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
 }
 
 static void cfq_completed_request(request_queue_t *q, struct request *rq)
@@ -1300,9 +1867,11 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
 
        cfqq = crq->cfq_queue;
 
-       if (crq->in_flight) {
-               WARN_ON(!cfqq->in_flight);
-               cfqq->in_flight--;
+       if (cfq_crq_in_flight(crq)) {
+               const int sync = cfq_crq_is_sync(crq);
+
+               WARN_ON(!cfqq->on_dispatch[sync]);
+               cfqq->on_dispatch[sync]--;
        }
 
        cfq_account_completion(cfqq, crq);
@@ -1332,51 +1901,136 @@ cfq_latter_request(request_queue_t *q, struct request *rq)
        return NULL;
 }
 
-static int cfq_may_queue(request_queue_t *q, int rw)
+/*
+ * we temporarily boost lower priority queues if they are holding fs exclusive
+ * resources. they are boosted to normal prio (CLASS_BE/4)
+ */
+static void cfq_prio_boost(struct cfq_queue *cfqq)
 {
-       struct cfq_data *cfqd = q->elevator->elevator_data;
-       struct cfq_queue *cfqq;
-       int ret = ELV_MQUEUE_MAY;
+       const int ioprio_class = cfqq->ioprio_class;
+       const int ioprio = cfqq->ioprio;
 
-       if (current->flags & PF_MEMALLOC)
-               return ELV_MQUEUE_MAY;
+       if (has_fs_excl()) {
+               /*
+                * boost idle prio on transactions that would lock out other
+                * users of the filesystem
+                */
+               if (cfq_class_idle(cfqq))
+                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
+               if (cfqq->ioprio > IOPRIO_NORM)
+                       cfqq->ioprio = IOPRIO_NORM;
+       } else {
+               /*
+                * check if we need to unboost the queue
+                */
+               if (cfqq->ioprio_class != cfqq->org_ioprio_class)
+                       cfqq->ioprio_class = cfqq->org_ioprio_class;
+               if (cfqq->ioprio != cfqq->org_ioprio)
+                       cfqq->ioprio = cfqq->org_ioprio;
+       }
 
-       cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current));
-       if (cfqq) {
-               int limit = cfqd->max_queued;
+       /*
+        * refile between round-robin lists if we moved the priority class
+        */
+       if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
+           cfq_cfqq_on_rr(cfqq))
+               cfq_resort_rr_list(cfqq, 0);
+}
 
-               if (cfqq->allocated[rw] < cfqd->cfq_queued)
-                       return ELV_MQUEUE_MUST;
+static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
+{
+       if (rw == READ || process_sync(task))
+               return task->pid;
 
-               if (cfqd->busy_queues)
-                       limit = q->nr_requests / cfqd->busy_queues;
+       return CFQ_KEY_ASYNC;
+}
 
-               if (limit < cfqd->cfq_queued)
-                       limit = cfqd->cfq_queued;
-               else if (limit > cfqd->max_queued)
-                       limit = cfqd->max_queued;
+static inline int
+__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+               struct task_struct *task, int rw)
+{
+#if 1
+       if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
+           !cfq_cfqq_must_alloc_slice(cfqq)) {
+               cfq_mark_cfqq_must_alloc_slice(cfqq);
+               return ELV_MQUEUE_MUST;
+       }
 
-               if (cfqq->allocated[rw] >= limit) {
-                       if (limit > cfqq->alloc_limit[rw])
-                               cfqq->alloc_limit[rw] = limit;
+       return ELV_MQUEUE_MAY;
+#else
+       if (!cfqq || task->flags & PF_MEMALLOC)
+               return ELV_MQUEUE_MAY;
+       if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
+               if (cfq_cfqq_wait_request(cfqq))
+                       return ELV_MQUEUE_MUST;
 
-                       ret = ELV_MQUEUE_NO;
+               /*
+                * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
+                * can quickly flood the queue with writes from a single task
+                */
+               if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
+                       cfq_mark_cfqq_must_alloc_slice(cfqq);
+                       return ELV_MQUEUE_MUST;
                }
+
+               return ELV_MQUEUE_MAY;
        }
+       if (cfq_class_idle(cfqq))
+               return ELV_MQUEUE_NO;
+       if (cfqq->allocated[rw] >= cfqd->max_queued) {
+               struct io_context *ioc = get_io_context(GFP_ATOMIC);
+               int ret = ELV_MQUEUE_NO;
 
-       return ret;
+               if (ioc && ioc->nr_batch_requests)
+                       ret = ELV_MQUEUE_MAY;
+
+               put_io_context(ioc);
+               return ret;
+       }
+
+       return ELV_MQUEUE_MAY;
+#endif
+}
+
+static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
+{
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct task_struct *tsk = current;
+       struct cfq_queue *cfqq;
+
+       /*
+        * don't force setup of a queue from here, as a call to may_queue
+        * does not necessarily imply that a request actually will be queued.
+        * so just lookup a possibly existing queue, or return 'may queue'
+        * if that fails
+        */
+       cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
+       if (cfqq) {
+               cfq_init_prio_data(cfqq);
+               cfq_prio_boost(cfqq);
+
+               return __cfq_may_queue(cfqd, cfqq, tsk, rw);
+       }
+
+       return ELV_MQUEUE_MAY;
 }
 
 static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
 {
+       struct cfq_data *cfqd = q->elevator->elevator_data;
        struct request_list *rl = &q->rq;
-       const int write = waitqueue_active(&rl->wait[WRITE]);
-       const int read = waitqueue_active(&rl->wait[READ]);
 
-       if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ])
-               wake_up(&rl->wait[READ]);
-       if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE])
-               wake_up(&rl->wait[WRITE]);
+       if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
+               smp_mb();
+               if (waitqueue_active(&rl->wait[READ]))
+                       wake_up(&rl->wait[READ]);
+       }
+
+       if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
+               smp_mb();
+               if (waitqueue_active(&rl->wait[WRITE]))
+                       wake_up(&rl->wait[WRITE]);
+       }
 }
 
 /*
@@ -1389,69 +2043,61 @@ static void cfq_put_request(request_queue_t *q, struct request *rq)
 
        if (crq) {
                struct cfq_queue *cfqq = crq->cfq_queue;
+               const int rw = rq_data_dir(rq);
 
-               BUG_ON(q->last_merge == rq);
-               BUG_ON(!hlist_unhashed(&crq->hash));
+               BUG_ON(!cfqq->allocated[rw]);
+               cfqq->allocated[rw]--;
 
-               if (crq->io_context)
-                       put_io_context(crq->io_context->ioc);
-
-               BUG_ON(!cfqq->allocated[crq->is_write]);
-               cfqq->allocated[crq->is_write]--;
+               put_io_context(crq->io_context->ioc);
 
                mempool_free(crq, cfqd->crq_pool);
                rq->elevator_private = NULL;
 
-               smp_mb();
                cfq_check_waiters(q, cfqq);
                cfq_put_queue(cfqq);
        }
 }
 
 /*
- * Allocate cfq data structures associated with this request. A queue and
+ * Allocate cfq data structures associated with this request.
  */
-static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+static int
+cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
+               int gfp_mask)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
+       struct task_struct *tsk = current;
        struct cfq_io_context *cic;
        const int rw = rq_data_dir(rq);
-       struct cfq_queue *cfqq, *saved_cfqq;
+       pid_t key = cfq_queue_pid(tsk, rw);
+       struct cfq_queue *cfqq;
        struct cfq_rq *crq;
        unsigned long flags;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
+       cic = cfq_get_io_context(cfqd, key, gfp_mask);
+
        spin_lock_irqsave(q->queue_lock, flags);
 
-       cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask);
-       if (!cfqq)
-               goto out_lock;
+       if (!cic)
+               goto queue_fail;
+
+       if (!cic->cfqq) {
+               cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
+               if (!cfqq)
+                       goto queue_fail;
 
-repeat:
-       if (cfqq->allocated[rw] >= cfqd->max_queued)
-               goto out_lock;
+               cic->cfqq = cfqq;
+       } else
+               cfqq = cic->cfqq;
 
        cfqq->allocated[rw]++;
+       cfq_clear_cfqq_must_alloc(cfqq);
+       cfqd->rq_starved = 0;
+       atomic_inc(&cfqq->ref);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       /*
-        * if hashing type has changed, the cfq_queue might change here.
-        */
-       saved_cfqq = cfqq;
-       cic = cfq_get_io_context(&cfqq, gfp_mask);
-       if (!cic)
-               goto err;
-
-       /*
-        * repeat allocation checks on queue change
-        */
-       if (unlikely(saved_cfqq != cfqq)) {
-               spin_lock_irqsave(q->queue_lock, flags);
-               saved_cfqq->allocated[rw]--;
-               goto repeat;
-       }
-
        crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
        if (crq) {
                RB_CLEAR(&crq->rb_node);
@@ -1460,24 +2106,141 @@ repeat:
                INIT_HLIST_NODE(&crq->hash);
                crq->cfq_queue = cfqq;
                crq->io_context = cic;
-               crq->service_start = crq->queue_start = 0;
-               crq->in_flight = crq->accounted = crq->is_sync = 0;
-               crq->is_write = rw;
+               cfq_clear_crq_in_flight(crq);
+               cfq_clear_crq_in_driver(crq);
+               cfq_clear_crq_requeued(crq);
+
+               if (rw == READ || process_sync(tsk))
+                       cfq_mark_crq_is_sync(crq);
+               else
+                       cfq_clear_crq_is_sync(crq);
+
                rq->elevator_private = crq;
-               cfqq->alloc_limit[rw] = 0;
                return 0;
        }
 
-       put_io_context(cic->ioc);
-err:
        spin_lock_irqsave(q->queue_lock, flags);
        cfqq->allocated[rw]--;
+       if (!(cfqq->allocated[0] + cfqq->allocated[1]))
+               cfq_mark_cfqq_must_alloc(cfqq);
        cfq_put_queue(cfqq);
-out_lock:
+queue_fail:
+       if (cic)
+               put_io_context(cic->ioc);
+       /*
+        * mark us rq allocation starved. we need to kickstart the process
+        * ourselves if there are no pending requests that can do it for us.
+        * that would be an extremely rare OOM situation
+        */
+       cfqd->rq_starved = 1;
+       cfq_schedule_dispatch(cfqd);
        spin_unlock_irqrestore(q->queue_lock, flags);
        return 1;
 }
 
+static void cfq_kick_queue(void *data)
+{
+       request_queue_t *q = data;
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+
+       if (cfqd->rq_starved) {
+               struct request_list *rl = &q->rq;
+
+               /*
+                * we aren't guaranteed to get a request after this, but we
+                * have to be opportunistic
+                */
+               smp_mb();
+               if (waitqueue_active(&rl->wait[READ]))
+                       wake_up(&rl->wait[READ]);
+               if (waitqueue_active(&rl->wait[WRITE]))
+                       wake_up(&rl->wait[WRITE]);
+       }
+
+       blk_remove_plug(q);
+       q->request_fn(q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+       struct cfq_queue *cfqq;
+       unsigned long flags;
+
+       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+       if ((cfqq = cfqd->active_queue) != NULL) {
+               unsigned long now = jiffies;
+
+               /*
+                * expired
+                */
+               if (time_after(now, cfqq->slice_end))
+                       goto expire;
+
+               /*
+                * only expire and reinvoke request handler, if there are
+                * other queues with pending requests
+                */
+               if (!cfq_pending_requests(cfqd)) {
+                       cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
+                       add_timer(&cfqd->idle_slice_timer);
+                       goto out_cont;
+               }
+
+               /*
+                * not expired and it has a request pending, let it dispatch
+                */
+               if (!RB_EMPTY(&cfqq->sort_list)) {
+                       cfq_mark_cfqq_must_dispatch(cfqq);
+                       goto out_kick;
+               }
+       }
+expire:
+       cfq_slice_expired(cfqd, 0);
+out_kick:
+       cfq_schedule_dispatch(cfqd);
+out_cont:
+       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+/*
+ * Timer running if an idle class queue is waiting for service
+ */
+static void cfq_idle_class_timer(unsigned long data)
+{
+       struct cfq_data *cfqd = (struct cfq_data *) data;
+       unsigned long flags, end;
+
+       spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+       /*
+        * race with a non-idle queue, reset timer
+        */
+       end = cfqd->last_end_request + CFQ_IDLE_GRACE;
+       if (!time_after_eq(jiffies, end)) {
+               cfqd->idle_class_timer.expires = end;
+               add_timer(&cfqd->idle_class_timer);
+       } else
+               cfq_schedule_dispatch(cfqd);
+
+       spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+{
+       del_timer_sync(&cfqd->idle_slice_timer);
+       del_timer_sync(&cfqd->idle_class_timer);
+       blk_sync_queue(cfqd->queue);
+}
+
 static void cfq_put_cfqd(struct cfq_data *cfqd)
 {
        request_queue_t *q = cfqd->queue;
@@ -1487,6 +2250,9 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
 
        blk_put_queue(q);
 
+       cfq_shutdown_timer_wq(cfqd);
+       q->elevator->elevator_data = NULL;
+
        mempool_destroy(cfqd->crq_pool);
        kfree(cfqd->crq_hash);
        kfree(cfqd->cfq_hash);
@@ -1495,7 +2261,10 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
 
 static void cfq_exit_queue(elevator_t *e)
 {
-       cfq_put_cfqd(e->elevator_data);
+       struct cfq_data *cfqd = e->elevator_data;
+
+       cfq_shutdown_timer_wq(cfqd);
+       cfq_put_cfqd(cfqd);
 }
 
 static int cfq_init_queue(request_queue_t *q, elevator_t *e)
@@ -1508,7 +2277,13 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
                return -ENOMEM;
 
        memset(cfqd, 0, sizeof(*cfqd));
-       INIT_LIST_HEAD(&cfqd->rr_list);
+
+       for (i = 0; i < CFQ_PRIO_LISTS; i++)
+               INIT_LIST_HEAD(&cfqd->rr_list[i]);
+
+       INIT_LIST_HEAD(&cfqd->busy_rr);
+       INIT_LIST_HEAD(&cfqd->cur_rr);
+       INIT_LIST_HEAD(&cfqd->idle_rr);
        INIT_LIST_HEAD(&cfqd->empty_list);
 
        cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
@@ -1533,24 +2308,32 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
        cfqd->queue = q;
        atomic_inc(&q->refcnt);
 
-       /*
-        * just set it to some high value, we want anyone to be able to queue
-        * some requests. fairness is handled differently
-        */
-       q->nr_requests = 1024;
-       cfqd->max_queued = q->nr_requests / 16;
+       cfqd->max_queued = q->nr_requests / 4;
        q->nr_batching = cfq_queued;
-       cfqd->key_type = CFQ_KEY_TGID;
-       cfqd->find_best_crq = 1;
+
+       init_timer(&cfqd->idle_slice_timer);
+       cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+       cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+       init_timer(&cfqd->idle_class_timer);
+       cfqd->idle_class_timer.function = cfq_idle_class_timer;
+       cfqd->idle_class_timer.data = (unsigned long) cfqd;
+
+       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+
        atomic_set(&cfqd->ref, 1);
 
        cfqd->cfq_queued = cfq_queued;
        cfqd->cfq_quantum = cfq_quantum;
-       cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r;
-       cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w;
-       cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
+       cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+       cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
        cfqd->cfq_back_max = cfq_back_max;
        cfqd->cfq_back_penalty = cfq_back_penalty;
+       cfqd->cfq_slice[0] = cfq_slice_async;
+       cfqd->cfq_slice[1] = cfq_slice_sync;
+       cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
+       cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_max_depth = cfq_max_depth;
 
        return 0;
 out_crqpool:
@@ -1595,7 +2378,6 @@ fail:
        return -ENOMEM;
 }
 
-
 /*
  * sysfs parts below -->
  */
@@ -1620,45 +2402,6 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
        return count;
 }
 
-static ssize_t
-cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
-{
-       max_elapsed_dispatch = max_elapsed_crq = 0;
-       return count;
-}
-
-static ssize_t
-cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
-{
-       spin_lock_irq(cfqd->queue->queue_lock);
-       if (!strncmp(page, "pgid", 4))
-               cfqd->key_type = CFQ_KEY_PGID;
-       else if (!strncmp(page, "tgid", 4))
-               cfqd->key_type = CFQ_KEY_TGID;
-       else if (!strncmp(page, "uid", 3))
-               cfqd->key_type = CFQ_KEY_UID;
-       else if (!strncmp(page, "gid", 3))
-               cfqd->key_type = CFQ_KEY_GID;
-       spin_unlock_irq(cfqd->queue->queue_lock);
-       return count;
-}
-
-static ssize_t
-cfq_read_key_type(struct cfq_data *cfqd, char *page)
-{
-       ssize_t len = 0;
-       int i;
-
-       for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
-               if (cfqd->key_type == i)
-                       len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
-               else
-                       len += sprintf(page+len, "%s ", cfq_key_types[i]);
-       }
-       len += sprintf(page+len, "\n");
-       return len;
-}
-
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                           \
 static ssize_t __FUNC(struct cfq_data *cfqd, char *page)               \
 {                                                                      \
@@ -1669,12 +2412,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page)                \
 }
 SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
-SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1);
-SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1);
-SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
-SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
 SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
 SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                        \
@@ -1694,12 +2440,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)    \
 }
 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
 STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
 #undef STORE_FUNCTION
 
 static struct cfq_fs_entry cfq_quantum_entry = {
@@ -1712,25 +2461,15 @@ static struct cfq_fs_entry cfq_queued_entry = {
        .show = cfq_queued_show,
        .store = cfq_queued_store,
 };
-static struct cfq_fs_entry cfq_fifo_expire_r_entry = {
+static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
        .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_fifo_expire_r_show,
-       .store = cfq_fifo_expire_r_store,
+       .show = cfq_fifo_expire_sync_show,
+       .store = cfq_fifo_expire_sync_store,
 };
-static struct cfq_fs_entry cfq_fifo_expire_w_entry = {
+static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
        .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_fifo_expire_w_show,
-       .store = cfq_fifo_expire_w_store,
-};
-static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
-       .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_fifo_batch_expire_show,
-       .store = cfq_fifo_batch_expire_store,
-};
-static struct cfq_fs_entry cfq_find_best_entry = {
-       .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_find_best_show,
-       .store = cfq_find_best_store,
+       .show = cfq_fifo_expire_async_show,
+       .store = cfq_fifo_expire_async_store,
 };
 static struct cfq_fs_entry cfq_back_max_entry = {
        .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
@@ -1742,27 +2481,44 @@ static struct cfq_fs_entry cfq_back_penalty_entry = {
        .show = cfq_back_penalty_show,
        .store = cfq_back_penalty_store,
 };
-static struct cfq_fs_entry cfq_clear_elapsed_entry = {
-       .attr = {.name = "clear_elapsed", .mode = S_IWUSR },
-       .store = cfq_clear_elapsed,
+static struct cfq_fs_entry cfq_slice_sync_entry = {
+       .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_sync_show,
+       .store = cfq_slice_sync_store,
+};
+static struct cfq_fs_entry cfq_slice_async_entry = {
+       .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_async_show,
+       .store = cfq_slice_async_store,
+};
+static struct cfq_fs_entry cfq_slice_async_rq_entry = {
+       .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_async_rq_show,
+       .store = cfq_slice_async_rq_store,
+};
+static struct cfq_fs_entry cfq_slice_idle_entry = {
+       .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_slice_idle_show,
+       .store = cfq_slice_idle_store,
 };
-static struct cfq_fs_entry cfq_key_type_entry = {
-       .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
-       .show = cfq_read_key_type,
-       .store = cfq_set_key_type,
+static struct cfq_fs_entry cfq_max_depth_entry = {
+       .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
+       .show = cfq_max_depth_show,
+       .store = cfq_max_depth_store,
 };
 
 static struct attribute *default_attrs[] = {
        &cfq_quantum_entry.attr,
        &cfq_queued_entry.attr,
-       &cfq_fifo_expire_r_entry.attr,
-       &cfq_fifo_expire_w_entry.attr,
-       &cfq_fifo_batch_expire_entry.attr,
-       &cfq_key_type_entry.attr,
-       &cfq_find_best_entry.attr,
+       &cfq_fifo_expire_sync_entry.attr,
+       &cfq_fifo_expire_async_entry.attr,
        &cfq_back_max_entry.attr,
        &cfq_back_penalty_entry.attr,
-       &cfq_clear_elapsed_entry.attr,
+       &cfq_slice_sync_entry.attr,
+       &cfq_slice_async_entry.attr,
+       &cfq_slice_async_rq_entry.attr,
+       &cfq_slice_idle_entry.attr,
+       &cfq_max_depth_entry.attr,
        NULL,
 };
 
@@ -1775,7 +2531,7 @@ cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
        struct cfq_fs_entry *entry = to_cfq(attr);
 
        if (!entry->show)
-               return 0;
+               return -EIO;
 
        return entry->show(e->elevator_data, page);
 }
@@ -1788,7 +2544,7 @@ cfq_attr_store(struct kobject *kobj, struct attribute *attr,
        struct cfq_fs_entry *entry = to_cfq(attr);
 
        if (!entry->store)
-               return -EINVAL;
+               return -EIO;
 
        return entry->store(e->elevator_data, page, length);
 }
@@ -1832,21 +2588,46 @@ static int __init cfq_init(void)
 {
        int ret;
 
+       /*
+        * could be 0 on HZ < 1000 setups
+        */
+       if (!cfq_slice_async)
+               cfq_slice_async = 1;
+       if (!cfq_slice_idle)
+               cfq_slice_idle = 1;
+
        if (cfq_slab_setup())
                return -ENOMEM;
 
        ret = elv_register(&iosched_cfq);
-       if (!ret) {
-               __module_get(THIS_MODULE);
-               return 0;
-       }
+       if (ret)
+               cfq_slab_kill();
 
-       cfq_slab_kill();
        return ret;
 }
 
 static void __exit cfq_exit(void)
 {
+       struct task_struct *g, *p;
+       unsigned long flags;
+
+       read_lock_irqsave(&tasklist_lock, flags);
+
+       /*
+        * iterate each process in the system, removing our io_context
+        */
+       do_each_thread(g, p) {
+               struct io_context *ioc = p->io_context;
+
+               if (ioc && ioc->cic) {
+                       ioc->cic->exit(ioc->cic);
+                       cfq_free_io_context(ioc->cic);
+                       ioc->cic = NULL;
+               }
+       } while_each_thread(g, p);
+
+       read_unlock_irqrestore(&tasklist_lock, flags);
+
        cfq_slab_kill();
        elv_unregister(&iosched_cfq);
 }