#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+ __put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+
/*
* These are the runqueue data structures:
*/
}
#ifdef CONFIG_SMP
-static inline void inc_prio_bias(runqueue_t *rq, int static_prio)
+static inline void inc_prio_bias(runqueue_t *rq, int prio)
{
- rq->prio_bias += MAX_PRIO - static_prio;
+ rq->prio_bias += MAX_PRIO - prio;
}
-static inline void dec_prio_bias(runqueue_t *rq, int static_prio)
+static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
- rq->prio_bias -= MAX_PRIO - static_prio;
+ rq->prio_bias -= MAX_PRIO - prio;
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running++;
+ if (rt_task(p)) {
+ if (p != rq->migration_thread)
+ /*
+ * The migration thread does the actual balancing. Do
+ * not bias by its priority as the ultra high priority
+ * will skew balancing adversely.
+ */
+ inc_prio_bias(rq, p->prio);
+ } else
+ inc_prio_bias(rq, p->static_prio);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running--;
+ if (rt_task(p)) {
+ if (p != rq->migration_thread)
+ dec_prio_bias(rq, p->prio);
+ } else
+ dec_prio_bias(rq, p->static_prio);
}
#else
-static inline void inc_prio_bias(runqueue_t *rq, int static_prio)
+static inline void inc_prio_bias(runqueue_t *rq, int prio)
{
}
-static inline void dec_prio_bias(runqueue_t *rq, int static_prio)
+static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
}
-#endif
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
- inc_prio_bias(rq, p->static_prio);
}
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
- dec_prio_bias(rq, p->static_prio);
}
+#endif
/*
* __activate_task - move a task to the runqueue.
}
#endif
- p->prio = recalc_task_prio(p, now);
+ if (!rt_task(p))
+ p->prio = recalc_task_prio(p, now);
/*
* This checks to make sure it's not an uninterruptible task
#ifdef CONFIG_SMP
static void resched_task(task_t *p)
{
- int need_resched, nrpolling;
+ int cpu;
assert_spin_locked(&task_rq(p)->lock);
- /* minimise the chance of sending an interrupt to poll_idle() */
- nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
- need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
- nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
+ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+ return;
+
+ set_tsk_thread_flag(p, TIF_NEED_RESCHED);
- if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
- smp_send_reschedule(task_cpu(p));
+ cpu = task_cpu(p);
+ if (cpu == smp_processor_id())
+ return;
+
+ /* NEED_RESCHED must be visible before we test POLLING_NRFLAG */
+ smp_mb();
+ if (!test_tsk_thread_flag(p, TIF_POLLING_NRFLAG))
+ smp_send_reschedule(cpu);
}
#else
static inline void resched_task(task_t *p)
{
+ assert_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
#endif
static inline unsigned long __source_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long cpu_load = rq->cpu_load[type-1],
- load_now = rq->nr_running * SCHED_LOAD_SCALE;
+ unsigned long running = rq->nr_running;
+ unsigned long source_load, cpu_load = rq->cpu_load[type-1],
+ load_now = running * SCHED_LOAD_SCALE;
- if (idle == NOT_IDLE) {
+ if (type == 0)
+ source_load = load_now;
+ else
+ source_load = min(cpu_load, load_now);
+
+ if (running > 1 || (idle == NOT_IDLE && running))
/*
- * If we are balancing busy runqueues the load is biased by
- * priority to create 'nice' support across cpus.
+ * If we are busy rebalancing the load is biased by
+ * priority to create 'nice' support across cpus. When
+ * idle rebalancing we should only bias the source_load if
+ * there is more than one task running on that queue to
+ * prevent idle rebalance from trying to pull tasks from a
+ * queue with only one running task.
*/
- cpu_load *= rq->prio_bias;
- load_now *= rq->prio_bias;
- }
+ source_load = source_load * rq->prio_bias / running;
- if (type == 0)
- return load_now;
-
- return min(cpu_load, load_now);
+ return source_load;
}
static inline unsigned long source_load(int cpu, int type)
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long cpu_load = rq->cpu_load[type-1],
- load_now = rq->nr_running * SCHED_LOAD_SCALE;
+ unsigned long running = rq->nr_running;
+ unsigned long target_load, cpu_load = rq->cpu_load[type-1],
+ load_now = running * SCHED_LOAD_SCALE;
if (type == 0)
- return load_now;
+ target_load = load_now;
+ else
+ target_load = max(cpu_load, load_now);
- if (idle == NOT_IDLE) {
- cpu_load *= rq->prio_bias;
- load_now *= rq->prio_bias;
- }
- return max(cpu_load, load_now);
+ if (running > 1 || (idle == NOT_IDLE && running))
+ target_load = target_load * rq->prio_bias / running;
+
+ return target_load;
}
static inline unsigned long target_load(int cpu, int type)
#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
- p->thread_info->preempt_count = 1;
+ task_thread_info(p)->preempt_count = 1;
#endif
/*
* Share the timeslice between parent and child, thus the
#endif
#ifdef CONFIG_DEBUG_STACK_USAGE
{
- unsigned long *n = (unsigned long *) (p->thread_info+1);
+ unsigned long *n = end_of_stack(p);
while (!*n)
n++;
- free = (unsigned long) n - (unsigned long)(p->thread_info+1);
+ free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
- idle->thread_info->preempt_count = (idle->lock_depth >= 0);
+ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
#else
- idle->thread_info->preempt_count = 0;
+ task_thread_info(idle)->preempt_count = 0;
#endif
}