]> err.no Git - linux-2.6/commitdiff
sched: remove leftover debugging
authorIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:16 +0000 (21:08 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 25 Jan 2008 20:08:16 +0000 (21:08 +0100)
remove leftover debugging.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched_rt.c

index deff0c77d7059909f1744629e98980e5662e2768..cc38521c5723edb0c6f07d6ae6ff6775daeaaf57 100644 (file)
@@ -253,8 +253,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
        struct list_head *queue;
        int idx;
 
-       assert_spin_locked(&rq->lock);
-
        if (likely(rq->rt.rt_nr_running < 2))
                return NULL;
 
@@ -500,8 +498,6 @@ static int push_rt_task(struct rq *rq)
        int ret = 0;
        int paranoid = RT_MAX_TRIES;
 
-       assert_spin_locked(&rq->lock);
-
        if (!rq->rt.overloaded)
                return 0;
 
@@ -546,8 +542,6 @@ static int push_rt_task(struct rq *rq)
                goto out;
        }
 
-       assert_spin_locked(&lowest_rq->lock);
-
        deactivate_task(rq, next_task, 0);
        set_task_cpu(next_task, lowest_rq->cpu);
        activate_task(lowest_rq, next_task, 0);
@@ -589,8 +583,6 @@ static int pull_rt_task(struct rq *this_rq)
        int cpu;
        int ret = 0;
 
-       assert_spin_locked(&this_rq->lock);
-
        /*
         * If cpusets are used, and we have overlapping
         * run queue cpusets, then this algorithm may not catch all.