]> err.no Git - linux-2.6/commitdiff
cfq: async queue allocation per priority
authorVasily Tarasov <vtaras@openvz.org>
Fri, 20 Jul 2007 08:06:38 +0000 (10:06 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 20 Jul 2007 08:06:38 +0000 (10:06 +0200)
If we have two processes with different ioprio_class, but the same
ioprio_data, their async requests will fall into the same queue. I guess
such behavior is not expected, because it's not right to put real-time
requests and best-effort requests in the same queue.

The attached patch fixes the problem by introducing additional *cfqq
fields on cfqd, pointing to per-(class,priority) async queues.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/cfq-iosched.c
include/linux/ioprio.h

index 9755a3cfad26e7f3ed50e2dbff15adaec15c4470..bc7190eed10d129012b5d4381f843ba6d7390d21 100644 (file)
@@ -92,7 +92,11 @@ struct cfq_data {
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
 
-       struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+       /*
+        * async queue for each priority case
+        */
+       struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+       struct cfq_queue *async_idle_cfqq;
 
        struct timer_list idle_class_timer;
 
@@ -1414,24 +1418,44 @@ out:
        return cfqq;
 }
 
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+       switch(ioprio_class) {
+       case IOPRIO_CLASS_RT:
+               return &cfqd->async_cfqq[0][ioprio];
+       case IOPRIO_CLASS_BE:
+               return &cfqd->async_cfqq[1][ioprio];
+       case IOPRIO_CLASS_IDLE:
+               return &cfqd->async_idle_cfqq;
+       default:
+               BUG();
+       }
+}
+
 static struct cfq_queue *
 cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
              gfp_t gfp_mask)
 {
        const int ioprio = task_ioprio(tsk);
+       const int ioprio_class = task_ioprio_class(tsk);
+       struct cfq_queue **async_cfqq = NULL;
        struct cfq_queue *cfqq = NULL;
 
-       if (!is_sync)
-               cfqq = cfqd->async_cfqq[ioprio];
+       if (!is_sync) {
+               async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+               cfqq = *async_cfqq;
+       }
+
        if (!cfqq)
                cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
 
        /*
         * pin the queue now that it's allocated, scheduler exit will prune it
         */
-       if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+       if (!is_sync && !(*async_cfqq)) {
                atomic_inc(&cfqq->ref);
-               cfqd->async_cfqq[ioprio] = cfqq;
+               *async_cfqq = cfqq;
        }
 
        atomic_inc(&cfqq->ref);
@@ -2042,11 +2066,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
        blk_sync_queue(cfqd->queue);
 }
 
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+       int i;
+
+       for (i = 0; i < IOPRIO_BE_NR; i++) {
+               if (cfqd->async_cfqq[0][i])
+                       cfq_put_queue(cfqd->async_cfqq[0][i]);
+               if (cfqd->async_cfqq[1][i])
+                       cfq_put_queue(cfqd->async_cfqq[1][i]);
+               if (cfqd->async_idle_cfqq)
+                       cfq_put_queue(cfqd->async_idle_cfqq);
+       }
+}
+
 static void cfq_exit_queue(elevator_t *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        request_queue_t *q = cfqd->queue;
-       int i;
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -2063,12 +2100,7 @@ static void cfq_exit_queue(elevator_t *e)
                __cfq_exit_single_io_context(cfqd, cic);
        }
 
-       /*
-        * Put the async queues
-        */
-       for (i = 0; i < IOPRIO_BE_NR; i++)
-               if (cfqd->async_cfqq[i])        
-                       cfq_put_queue(cfqd->async_cfqq[i]);
+       cfq_put_async_queues(cfqd);
 
        spin_unlock_irq(q->queue_lock);
 
index 2eaa142cd06171bae79f35a7e72442648cb86f46..baf29387cab4201a991ff4a8663fbba5e05fa149 100644 (file)
@@ -53,6 +53,14 @@ static inline int task_ioprio(struct task_struct *task)
        return IOPRIO_NORM;
 }
 
+static inline int task_ioprio_class(struct task_struct *task)
+{
+       if (ioprio_valid(task->ioprio))
+               return IOPRIO_PRIO_CLASS(task->ioprio);
+
+       return IOPRIO_CLASS_BE;
+}
+
 static inline int task_nice_ioprio(struct task_struct *task)
 {
        return (task_nice(task) + 20) / 5;