* bump this up when changing the output format or the meaning of an existing
* format, so that tools can adapt (or abort)
*/
-#define SCHEDSTAT_VERSION 12
+#define SCHEDSTAT_VERSION 14
static int show_schedstat(struct seq_file *seq, void *v)
{
seq_printf(seq, "domain%d %s", dcnt++, mask_str);
for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
itype++) {
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
+ seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
+ "%lu",
sd->lb_cnt[itype],
sd->lb_balanced[itype],
sd->lb_failed[itype],
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
- seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
+ seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
+ " %lu %lu %lu\n",
sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
- sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
+ sd->ttwu_wake_remote, sd->ttwu_move_affine,
+ sd->ttwu_move_balance);
}
preempt_enable();
#endif
{
unsigned long long now;
+ if (rt_task(p))
+ goto out;
+
now = sched_clock();
#ifdef CONFIG_SMP
if (!local) {
(now - p->timestamp) >> 20);
}
- if (!rt_task(p))
- p->prio = recalc_task_prio(p, now);
+ p->prio = recalc_task_prio(p, now);
/*
* This checks to make sure it's not an uninterruptible task
}
}
p->timestamp = now;
-
+out:
__activate_task(p, rq);
}
if (this_sd->flags & SD_WAKE_AFFINE) {
unsigned long tl = this_load;
- unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
+ unsigned long tl_per_task;
+
+ tl_per_task = cpu_avg_load_per_task(this_cpu);
/*
* If sync wakeup then subtract the (maximum possible)
return try_to_wake_up(p, state, 0);
}
+static void task_running_tick(struct rq *rq, struct task_struct *p);
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
* runqueue lock is not a problem.
*/
current->time_slice = 1;
- scheduler_tick();
+ task_running_tick(cpu_rq(cpu), current);
}
local_irq_enable();
put_cpu();
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum idle_type idle, int *sd_idle,
- cpumask_t *cpus)
+ cpumask_t *cpus, int *balance)
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
unsigned long load, group_capacity;
int local_group;
int i;
+ unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long sum_nr_running, sum_weighted_load;
local_group = cpu_isset(this_cpu, group->cpumask);
+ if (local_group)
+ balance_cpu = first_cpu(group->cpumask);
+
/* Tally up the load of all CPUs in the group */
sum_weighted_load = sum_nr_running = avg_load = 0;
*sd_idle = 0;
/* Bias balancing toward cpus of our domain */
- if (local_group)
+ if (local_group) {
+ if (idle_cpu(i) && !first_idle_cpu) {
+ first_idle_cpu = 1;
+ balance_cpu = i;
+ }
+
load = target_load(i, load_idx);
- else
+ } else
load = source_load(i, load_idx);
avg_load += load;
sum_weighted_load += rq->raw_weighted_load;
}
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above
+ * domains.
+ */
+ if (local_group && balance_cpu != this_cpu && balance) {
+ *balance = 0;
+ goto ret;
+ }
+
total_load += avg_load;
total_pwr += group->cpu_power;
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
+ tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
+ busiest->cpu_power;
if (max_load > tmp)
pwr_move += busiest->cpu_power *
min(busiest_load_per_task, max_load - tmp);
/* Amount of load we'd add */
- if (max_load*busiest->cpu_power <
- busiest_load_per_task*SCHED_LOAD_SCALE)
- tmp = max_load*busiest->cpu_power/this->cpu_power;
+ if (max_load * busiest->cpu_power <
+ busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = max_load * busiest->cpu_power / this->cpu_power;
else
- tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
- pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
+ tmp = busiest_load_per_task * SCHED_LOAD_SCALE /
+ this->cpu_power;
+ pwr_move += this->cpu_power *
+ min(this_load_per_task, this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
*imbalance = min_load_per_task;
return group_min;
}
-ret:
#endif
+ret:
*imbalance = 0;
return NULL;
}
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
- struct sched_domain *sd, enum idle_type idle)
+ struct sched_domain *sd, enum idle_type idle,
+ int *balance)
{
int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
struct sched_group *group;
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
- &cpus);
+ &cpus, balance);
+
+ if (*balance == 0)
+ goto out_balanced;
+
if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
- &sd_idle, &cpus);
+ &sd_idle, &cpus, NULL);
if (!group) {
schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
goto out_balanced;
static void run_rebalance_domains(struct softirq_action *h)
{
- int this_cpu = smp_processor_id();
+ int this_cpu = smp_processor_id(), balance = 1;
struct rq *this_rq = cpu_rq(this_cpu);
unsigned long interval;
struct sched_domain *sd;
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(this_cpu, this_rq, sd, idle)) {
+ if (load_balance(this_cpu, this_rq, sd, idle, &balance)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
out:
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
+
+ /*
+ * Stop the load balance at this level. There is another
+ * CPU in our sched group which is doing load balancing more
+ * actively.
+ */
+ if (!balance)
+ break;
}
this_rq->next_balance = next_balance;
}
/*
* Spinlock count overflowing soon?
*/
- DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+ PREEMPT_MASK - 10);
}
EXPORT_SYMBOL(add_preempt_count);
"%s/0x%08x/%d\n",
current->comm, preempt_count(), current->pid);
debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
dump_stack();
}
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
return 0;
}
-static inline int __resched_legal(int expected_preempt_count)
-{
- if (unlikely(preempt_count() != expected_preempt_count))
- return 0;
- if (unlikely(system_state != SYSTEM_RUNNING))
- return 0;
- return 1;
-}
-
static void __cond_resched(void)
{
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
int __sched cond_resched(void)
{
- if (need_resched() && __resched_legal(0)) {
+ if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
+ system_state == SYSTEM_RUNNING) {
__cond_resched();
return 1;
}
ret = 1;
spin_lock(lock);
}
- if (need_resched() && __resched_legal(1)) {
+ if (need_resched() && system_state == SYSTEM_RUNNING) {
spin_release(&lock->dep_map, 1, _THIS_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
{
BUG_ON(!in_softirq());
- if (need_resched() && __resched_legal(0)) {
+ if (need_resched() && system_state == SYSTEM_RUNNING) {
raw_local_irq_disable();
_local_bh_enable();
raw_local_irq_enable();
if (!(sd->flags & SD_LOAD_BALANCE)) {
printk("does not load-balance\n");
if (sd->parent)
- printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
+ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+ " has parent");
break;
}
printk("span %s\n", str);
if (!cpu_isset(cpu, sd->span))
- printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
+ printk(KERN_ERR "ERROR: domain->span does not contain "
+ "CPU%d\n", cpu);
if (!cpu_isset(cpu, group->cpumask))
- printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
+ printk(KERN_ERR "ERROR: domain->groups does not contain"
+ " CPU%d\n", cpu);
printk(KERN_DEBUG);
for (i = 0; i < level + 2; i++)
if (!group->cpu_power) {
printk("\n");
- printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
+ printk(KERN_ERR "ERROR: domain->cpu_power not "
+ "set\n");
}
if (!cpus_weight(group->cpumask)) {
printk("\n");
if (!cpus_equal(sd->span, groupmask))
- printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+ printk(KERN_ERR "ERROR: groups don't span "
+ "domain->span\n");
level++;
sd = sd->parent;
+ if (!sd)
+ continue;
- if (sd) {
- if (!cpus_subset(groupmask, sd->span))
- printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
- }
+ if (!cpus_subset(groupmask, sd->span))
+ printk(KERN_ERR "ERROR: parent span is not a superset "
+ "of domain->span\n");
} while (sd);
}
}
/* cpus with isolated domains */
-static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE;
+static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
/* Setup the mask of cpus configured for isolated domains */
static int __init isolated_cpu_setup(char *str)
*/
static void touch_cache(void *__cache, unsigned long __size)
{
- unsigned long size = __size/sizeof(long), chunk1 = size/3,
- chunk2 = 2*size/3;
+ unsigned long size = __size / sizeof(long);
+ unsigned long chunk1 = size / 3;
+ unsigned long chunk2 = 2 * size / 3;
unsigned long *cache = __cache;
int i;
*/
measure_one(cache, size, cpu1, cpu2);
for (i = 0; i < ITERATIONS; i++)
- cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+ cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
measure_one(cache, size, cpu2, cpu1);
for (i = 0; i < ITERATIONS; i++)
- cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+ cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
/*
* (We measure the non-migrating [cached] cost on both
measure_one(cache, size, cpu1, cpu1);
for (i = 0; i < ITERATIONS; i++)
- cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+ cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
measure_one(cache, size, cpu2, cpu2);
for (i = 0; i < ITERATIONS; i++)
- cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+ cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
/*
* Get the per-iteration migration cost:
*/
- do_div(cost1, 2*ITERATIONS);
- do_div(cost2, 2*ITERATIONS);
+ do_div(cost1, 2 * ITERATIONS);
+ do_div(cost2, 2 * ITERATIONS);
return cost1 - cost2;
}
*/
cache = vmalloc(max_size);
if (!cache) {
- printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
+ printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
return 1000000; /* return 1 msec on very small boxen */
}
avg_fluct = (avg_fluct + fluct)/2;
if (migration_debug)
- printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+ printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
+ "(%8Ld %8Ld)\n",
cpu1, cpu2, size,
(long)cost / 1000000,
((long)cost / 100000) % 10,
-1
#endif
);
- if (system_state == SYSTEM_BOOTING) {
- if (num_online_cpus() > 1) {
- printk("migration_cost=");
- for (distance = 0; distance <= max_distance; distance++) {
- if (distance)
- printk(",");
- printk("%ld", (long)migration_cost[distance] / 1000);
- }
- printk("\n");
+ if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
+ printk("migration_cost=");
+ for (distance = 0; distance <= max_distance; distance++) {
+ if (distance)
+ printk(",");
+ printk("%ld", (long)migration_cost[distance] / 1000);
}
+ printk("\n");
}
j1 = jiffies;
if (migration_debug)
- printk("migration: %ld seconds\n", (j1-j0)/HZ);
+ printk("migration: %ld seconds\n", (j1-j0) / HZ);
/*
* Move back to the original CPU. NUMA-Q gets confused
lock_cpu_hotplug();
arch_init_sched_domains(&cpu_online_map);
- cpus_andnot(non_isolated_cpus, cpu_online_map, cpu_isolated_map);
+ cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
if (cpus_empty(non_isolated_cpus))
cpu_set(smp_processor_id(), non_isolated_cpus);
unlock_cpu_hotplug();
printk("in_atomic():%d, irqs_disabled():%d\n",
in_atomic(), irqs_disabled());
debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
dump_stack();
}
#endif