aggregate_group_set_shares(tg, cpu, sd);
}
+static void
+aggregate_get_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
+{
+}
+
static DEFINE_PER_CPU(spinlock_t, aggregate_lock);
static void __init init_aggregate(void)
return 1;
}
+static void update_aggregate(int cpu, struct sched_domain *sd)
+{
+ aggregate_walk_tree(aggregate_get_down, aggregate_get_nop, cpu, sd);
+}
+
static void put_aggregate(int cpu, struct sched_domain *sd)
{
spin_unlock(&per_cpu(aggregate_lock, cpu));
return 0;
}
+static inline void update_aggregate(int cpu, struct sched_domain *sd)
+{
+}
+
static inline void put_aggregate(int cpu, struct sched_domain *sd)
{
}
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
+ /*
+ * now that we have both rqs locked the rq weight won't change
+ * anymore - so update the stats.
+ */
+ update_aggregate(this_cpu, sd);
+
do {
unsigned long load, avg_load;
int local_group;