X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched.c;h=b062856b946c4fbe4387763b0fdd61f9ac48625b;hb=db292ca302e83534f5f0f7139e13d7e6976e51f9;hp=f40fe027723f7473a1ad91ec0fce2aa48121855e;hpb=ad1cdc1d7883e88f936f7888a092e4e3e6d8c631;p=linux-2.6 diff --git a/kernel/sched.c b/kernel/sched.c index f40fe02772..b062856b94 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -64,6 +65,7 @@ #include #include +#include /* * Scheduler clock - returns current time in nanosec units. @@ -72,7 +74,7 @@ */ unsigned long long __attribute__((weak)) sched_clock(void) { - return (unsigned long long)jiffies * (1000000000 / HZ); + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); } /* @@ -96,8 +98,8 @@ unsigned long long __attribute__((weak)) sched_clock(void) /* * Some helpers for converting nanosecond timing to jiffy resolution */ -#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ)) -#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) +#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) +#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ)) #define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_SHIFT SCHED_LOAD_SHIFT @@ -153,10 +155,15 @@ struct rt_prio_array { #ifdef CONFIG_FAIR_GROUP_SCHED +#include + struct cfs_rq; /* task group related information */ struct task_group { +#ifdef CONFIG_FAIR_CGROUP_SCHED + struct cgroup_subsys_state css; +#endif /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ @@ -164,6 +171,7 @@ struct task_group { unsigned long shares; /* spinlock to serialize modification to shares */ spinlock_t lock; + struct rcu_head rcu; }; /* Default task group's sched entity on each cpu */ @@ -197,6 +205,9 @@ static inline struct task_group *task_group(struct task_struct *p) #ifdef CONFIG_FAIR_USER_SCHED tg = p->user->tg; +#elif defined(CONFIG_FAIR_CGROUP_SCHED) + tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), + struct task_group, css); #else tg = &init_task_group; #endif @@ -205,15 +216,15 @@ static inline struct task_group *task_group(struct task_struct *p) } /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ -static inline void set_task_cfs_rq(struct task_struct *p) +static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { - p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; - p->se.parent = task_group(p)->se[task_cpu(p)]; + p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; + p->se.parent = task_group(p)->se[cpu]; } #else -static inline void set_task_cfs_rq(struct task_struct *p) { } +static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -247,7 +258,6 @@ struct cfs_rq { */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct task_group *tg; /* group that "owns" this runqueue */ - struct rcu_head rcu; #endif }; @@ -266,7 +276,8 @@ struct rt_rq { * acquire operations must be ordered by ascending &runqueue. */ struct rq { - spinlock_t lock; /* runqueue lock */ + /* runqueue lock: */ + spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -279,13 +290,15 @@ struct rq { #ifdef CONFIG_NO_HZ unsigned char in_nohz_recently; #endif - struct load_weight load; /* capture load from *all* tasks on this cpu */ + /* capture load from *all* tasks on this cpu: */ + struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; struct cfs_rq cfs; #ifdef CONFIG_FAIR_GROUP_SCHED - struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ + /* list of leaf cfs_rq on this cpu: */ + struct list_head leaf_cfs_rq_list; #endif struct rt_rq rt; @@ -317,7 +330,8 @@ struct rq { /* For active balancing */ int active_balance; int push_cpu; - int cpu; /* cpu of this runqueue */ + /* cpu of this runqueue: */ + int cpu; struct task_struct *migration_thread; struct list_head migration_queue; @@ -328,22 +342,22 @@ struct rq { struct sched_info rq_sched_info; /* sys_sched_yield() stats */ - unsigned long yld_exp_empty; - unsigned long yld_act_empty; - unsigned long yld_both_empty; - unsigned long yld_count; + unsigned int yld_exp_empty; + unsigned int yld_act_empty; + unsigned int yld_both_empty; + unsigned int yld_count; /* schedule() stats */ - unsigned long sched_switch; - unsigned long sched_count; - unsigned long sched_goidle; + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; /* try_to_wake_up() stats */ - unsigned long ttwu_count; - unsigned long ttwu_local; + unsigned int ttwu_count; + unsigned int ttwu_local; /* BKL stats */ - unsigned long bkl_count; + unsigned int bkl_count; #endif struct lock_class_key rq_lock_key; }; @@ -441,23 +455,27 @@ static void update_rq_clock(struct rq *rq) */ enum { SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, - SCHED_FEAT_START_DEBIT = 2, - SCHED_FEAT_TREE_AVG = 4, - SCHED_FEAT_APPROX_AVG = 8, - SCHED_FEAT_WAKEUP_PREEMPT = 16, - SCHED_FEAT_PREEMPT_RESTRICT = 32, + SCHED_FEAT_WAKEUP_PREEMPT = 2, + SCHED_FEAT_START_DEBIT = 4, + SCHED_FEAT_TREE_AVG = 8, + SCHED_FEAT_APPROX_AVG = 16, }; const_debug unsigned int sysctl_sched_features = - SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | - SCHED_FEAT_START_DEBIT *1 | - SCHED_FEAT_TREE_AVG *0 | - SCHED_FEAT_APPROX_AVG *0 | - SCHED_FEAT_WAKEUP_PREEMPT *1 | - SCHED_FEAT_PREEMPT_RESTRICT *1; + SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | + SCHED_FEAT_WAKEUP_PREEMPT * 1 | + SCHED_FEAT_START_DEBIT * 1 | + SCHED_FEAT_TREE_AVG * 0 | + SCHED_FEAT_APPROX_AVG * 0; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) +/* + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ +const_debug unsigned int sysctl_sched_nr_migrate = 32; + /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu * clock constructed from sched_clock(): @@ -823,11 +841,24 @@ struct rq_iterator { struct task_struct *(*next)(void *); }; -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator); +#ifdef CONFIG_SMP +static unsigned long +balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, struct sched_domain *sd, + enum cpu_idle_type idle, int *all_pinned, + int *this_best_prio, struct rq_iterator *iterator); + +static int +iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle, + struct rq_iterator *iterator); +#endif + +#ifdef CONFIG_CGROUP_CPUACCT +static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +#endif #include "sched_stats.h" #include "sched_idletask.c" @@ -997,10 +1028,16 @@ unsigned long weighted_cpuload(const int cpu) static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { + set_task_cfs_rq(p, cpu); #ifdef CONFIG_SMP + /* + * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be + * successfuly executed on another CPU. We must ensure that updates of + * per-task data have been completed by this moment. + */ + smp_wmb(); task_thread_info(p)->cpu = cpu; #endif - set_task_cfs_rq(p); } #ifdef CONFIG_SMP @@ -1521,6 +1558,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) unsigned long tl = this_load; unsigned long tl_per_task; + /* + * Attract cache-cold tasks on sync wakeups: + */ + if (sync && !task_hot(p, rq->clock, this_sd)) + goto out_set_cpu; + schedstat_inc(p, se.nr_wakeups_affine_attempts); tl_per_task = cpu_avg_load_per_task(this_cpu); @@ -1590,16 +1633,7 @@ out_activate: schedstat_inc(p, se.nr_wakeups_remote); update_rq_clock(rq); activate_task(rq, p, 1); - /* - * Sync wakeups (i.e. those types of wakeups where the waker - * has indicated that it will leave the CPU in short order) - * don't trigger a preemption, if the woken up task will run on - * this cpu. (in this case the 'I will reschedule' promise of - * the waker guarantees that the freshly woken up task is going - * to be considered on this CPU.) - */ - if (!sync || cpu != this_cpu) - check_preempt_curr(rq, p); + check_preempt_curr(rq, p); success = 1; out_running: @@ -1715,7 +1749,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) p->prio = effective_prio(p); - if (!p->sched_class->task_new || !current->se.on_rq || !rq->cfs.curr) { + if (!p->sched_class->task_new || !current->se.on_rq) { activate_task(rq, p, 0); } else { /* @@ -1874,7 +1908,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) preempt_enable(); #endif if (current->set_child_tid) - put_user(current->pid, current->set_child_tid); + put_user(task_pid_vnr(current), current->set_child_tid); } /* @@ -2212,17 +2246,17 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, return 1; } -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator) +static unsigned long +balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, struct sched_domain *sd, + enum cpu_idle_type idle, int *all_pinned, + int *this_best_prio, struct rq_iterator *iterator) { - int pulled = 0, pinned = 0, skip_for_load; + int loops = 0, pulled = 0, pinned = 0, skip_for_load; struct task_struct *p; long rem_load_move = max_load_move; - if (max_nr_move == 0 || max_load_move == 0) + if (max_load_move == 0) goto out; pinned = 1; @@ -2232,10 +2266,10 @@ static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, */ p = iterator->start(iterator->arg); next: - if (!p) + if (!p || loops++ > sysctl_sched_nr_migrate) goto out; /* - * To help distribute high priority tasks accross CPUs we don't + * To help distribute high priority tasks across CPUs we don't * skip a task if it will be the highest priority task (i.e. smallest * prio value) on its new queue regardless of its load weight */ @@ -2252,10 +2286,9 @@ next: rem_load_move -= p->se.load.weight; /* - * We only want to steal up to the prescribed number of tasks - * and the prescribed amount of weighted load. + * We only want to steal up to the prescribed amount of weighted load. */ - if (pulled < max_nr_move && rem_load_move > 0) { + if (rem_load_move > 0) { if (p->prio < *this_best_prio) *this_best_prio = p->prio; p = iterator->next(iterator->arg); @@ -2263,7 +2296,7 @@ next: } out: /* - * Right now, this is the only place pull_task() is called, + * Right now, this is one of only two places pull_task() is called, * so we can safely collect pull_task() stats here rather than * inside pull_task(). */ @@ -2271,8 +2304,8 @@ out: if (all_pinned) *all_pinned = pinned; - *load_moved = max_load_move - rem_load_move; - return pulled; + + return max_load_move - rem_load_move; } /* @@ -2294,7 +2327,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, do { total_load_moved += class->load_balance(this_rq, this_cpu, busiest, - ULONG_MAX, max_load_move - total_load_moved, + max_load_move - total_load_moved, sd, idle, all_pinned, &this_best_prio); class = class->next; } while (class && max_load_move > total_load_moved); @@ -2302,6 +2335,32 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, return total_load_moved > 0; } +static int +iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle, + struct rq_iterator *iterator) +{ + struct task_struct *p = iterator->start(iterator->arg); + int pinned = 0; + + while (p) { + if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) { + pull_task(busiest, p, this_rq, this_cpu); + /* + * Right now, this is only the second place pull_task() + * is called, so we can safely collect pull_task() + * stats here rather than inside pull_task(). + */ + schedstat_inc(sd, lb_gained[idle]); + + return 1; + } + p = iterator->next(iterator->arg); + } + + return 0; +} + /* * move_one_task tries to move exactly one task from busiest to this_rq, as * part of active balancing operations within "domain". @@ -2313,12 +2372,9 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle) { const struct sched_class *class; - int this_best_prio = MAX_PRIO; for (class = sched_class_highest; class; class = class->next) - if (class->load_balance(this_rq, this_cpu, busiest, - 1, ULONG_MAX, sd, idle, NULL, - &this_best_prio)) + if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) return 1; return 0; @@ -2339,7 +2395,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, unsigned long max_pull; unsigned long busiest_load_per_task, busiest_nr_running; unsigned long this_load_per_task, this_nr_running; - int load_idx; + int load_idx, group_imb = 0; #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int power_savings_balance = 1; unsigned long leader_nr_running = 0, min_load_per_task = 0; @@ -2358,9 +2414,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, load_idx = sd->idle_idx; do { - unsigned long load, group_capacity; + unsigned long load, group_capacity, max_cpu_load, min_cpu_load; int local_group; int i; + int __group_imb = 0; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long sum_nr_running, sum_weighted_load; @@ -2371,6 +2428,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* Tally up the load of all CPUs in the group */ sum_weighted_load = sum_nr_running = avg_load = 0; + max_cpu_load = 0; + min_cpu_load = ~0UL; for_each_cpu_mask(i, group->cpumask) { struct rq *rq; @@ -2391,8 +2450,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, } load = target_load(i, load_idx); - } else + } else { load = source_load(i, load_idx); + if (load > max_cpu_load) + max_cpu_load = load; + if (min_cpu_load > load) + min_cpu_load = load; + } avg_load += load; sum_nr_running += rq->nr_running; @@ -2418,6 +2482,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, avg_load = sg_div_cpu_power(group, avg_load * SCHED_LOAD_SCALE); + if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE) + __group_imb = 1; + group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; if (local_group) { @@ -2426,11 +2493,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, this_nr_running = sum_nr_running; this_load_per_task = sum_weighted_load; } else if (avg_load > max_load && - sum_nr_running > group_capacity) { + (sum_nr_running > group_capacity || __group_imb)) { max_load = avg_load; busiest = group; busiest_nr_running = sum_nr_running; busiest_load_per_task = sum_weighted_load; + group_imb = __group_imb; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -2502,6 +2570,9 @@ group_next: goto out_balanced; busiest_load_per_task /= busiest_nr_running; + if (group_imb) + busiest_load_per_task = min(busiest_load_per_task, avg_load); + /* * We're trying to get all the cpus to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to @@ -3240,18 +3311,6 @@ static inline void idle_balance(int cpu, struct rq *rq) { } -/* Avoid "used but not defined" warning on UP */ -static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, - unsigned long max_nr_move, unsigned long max_load_move, - struct sched_domain *sd, enum cpu_idle_type idle, - int *all_pinned, unsigned long *load_moved, - int *this_best_prio, struct rq_iterator *iterator) -{ - *load_moved = 0; - - return 0; -} - #endif DEFINE_PER_CPU(struct kernel_stat, kstat); @@ -3284,7 +3343,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to - * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in user space since the last update */ void account_user_time(struct task_struct *p, cputime_t cputime) @@ -3302,6 +3360,35 @@ void account_user_time(struct task_struct *p, cputime_t cputime) cpustat->user = cputime64_add(cpustat->user, tmp); } +/* + * Account guest cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in virtual machine since the last update + */ +static void account_guest_time(struct task_struct *p, cputime_t cputime) +{ + cputime64_t tmp; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + + tmp = cputime_to_cputime64(cputime); + + p->utime = cputime_add(p->utime, cputime); + p->gtime = cputime_add(p->gtime, cputime); + + cpustat->user = cputime64_add(cpustat->user, tmp); + cpustat->guest = cputime64_add(cpustat->guest, tmp); +} + +/* + * Account scaled user cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in user space since the last update + */ +void account_user_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->utimescaled = cputime_add(p->utimescaled, cputime); +} + /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to @@ -3315,6 +3402,9 @@ void account_system_time(struct task_struct *p, int hardirq_offset, struct rq *rq = this_rq(); cputime64_t tmp; + if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) + return account_guest_time(p, cputime); + p->stime = cputime_add(p->stime, cputime); /* Add system time to cpustat. */ @@ -3333,6 +3423,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset, acct_update_integrals(p); } +/* + * Account scaled system cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @hardirq_offset: the offset to subtract from hardirq_count() + * @cputime: the cpu time spent in kernel space since the last update + */ +void account_system_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->stimescaled = cputime_add(p->stimescaled, cputime); +} + /* * Account for involuntary wait time. * @p: the process from which the cpu time has been stolen @@ -3430,12 +3531,19 @@ EXPORT_SYMBOL(sub_preempt_count); */ static noinline void __schedule_bug(struct task_struct *prev) { - printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), prev->pid); + struct pt_regs *regs = get_irq_regs(); + + printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", + prev->comm, prev->pid, preempt_count()); + debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); - dump_stack(); + + if (regs) + show_regs(regs); + else + dump_stack(); } /* @@ -3743,7 +3851,7 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) } EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ -void fastcall complete(struct completion *x) +void complete(struct completion *x) { unsigned long flags; @@ -3755,7 +3863,7 @@ void fastcall complete(struct completion *x) } EXPORT_SYMBOL(complete); -void fastcall complete_all(struct completion *x) +void complete_all(struct completion *x) { unsigned long flags; @@ -3807,13 +3915,13 @@ wait_for_common(struct completion *x, long timeout, int state) return timeout; } -void fastcall __sched wait_for_completion(struct completion *x) +void __sched wait_for_completion(struct completion *x) { wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_for_completion); -unsigned long fastcall __sched +unsigned long __sched wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); @@ -3822,11 +3930,14 @@ EXPORT_SYMBOL(wait_for_completion_timeout); int __sched wait_for_completion_interruptible(struct completion *x) { - return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + if (t == -ERESTARTSYS) + return t; + return 0; } EXPORT_SYMBOL(wait_for_completion_interruptible); -unsigned long fastcall __sched +unsigned long __sched wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { @@ -4094,7 +4205,7 @@ struct task_struct *idle_task(int cpu) */ static struct task_struct *find_process_by_pid(pid_t pid) { - return pid ? find_task_by_pid(pid) : current; + return pid ? find_task_by_vpid(pid) : current; } /* Actually do priority change: must hold rq lock. */ @@ -4397,8 +4508,21 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) cpus_allowed = cpuset_cpus_allowed(p); cpus_and(new_mask, new_mask, cpus_allowed); + again: retval = set_cpus_allowed(p, new_mask); + if (!retval) { + cpus_allowed = cpuset_cpus_allowed(p); + if (!cpus_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + new_mask = cpus_allowed; + goto again; + } + } out_unlock: put_task_struct(p); mutex_unlock(&sched_hotcpu_mutex); @@ -4726,17 +4850,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) if (retval) goto out_unlock; - if (p->policy == SCHED_FIFO) - time_slice = 0; - else if (p->policy == SCHED_RR) + /* + * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER + * tasks that are on an otherwise idle runqueue: + */ + time_slice = 0; + if (p->policy == SCHED_RR) { time_slice = DEF_TIMESLICE; - else { + } else { struct sched_entity *se = &p->se; unsigned long flags; struct rq *rq; rq = task_rq_lock(p, &flags); - time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); + if (rq->cfs.load.weight) + time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); task_rq_unlock(rq, &flags); } read_unlock(&tasklist_lock); @@ -4757,18 +4885,18 @@ static void show_task(struct task_struct *p) unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk("%-13.13s %c", p->comm, + printk(KERN_INFO "%-13.13s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) - printk(" running "); + printk(KERN_CONT " running "); else - printk(" %08lx ", thread_saved_pc(p)); + printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) - printk(" running task "); + printk(KERN_CONT " running task "); else - printk(" %016lx ", thread_saved_pc(p)); + printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE { @@ -4778,7 +4906,8 @@ static void show_task(struct task_struct *p) free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif - printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); + printk(KERN_CONT "%5lu %5d %6d\n", free, + task_pid_nr(p), task_pid_nr(p->parent)); if (state != TASK_RUNNING) show_stack(p, NULL); @@ -4872,6 +5001,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) */ cpumask_t nohz_cpu_mask = CPU_MASK_NONE; +/* + * Increase the granularity value when there are more CPUs, + * because with more CPUs the 'effective latency' as visible + * to users decreases. But the relationship is not linear, + * so pick a second-best guess by going with the log2 of the + * number of CPUs. + * + * This idea comes from the SD scheduler of Con Kolivas: + */ +static inline void sched_init_granularity(void) +{ + unsigned int factor = 1 + ilog2(num_online_cpus()); + const unsigned long limit = 200000000; + + sysctl_sched_min_granularity *= factor; + if (sysctl_sched_min_granularity > limit) + sysctl_sched_min_granularity = limit; + + sysctl_sched_latency *= factor; + if (sysctl_sched_latency > limit) + sysctl_sched_latency = limit; + + sysctl_sched_wakeup_granularity *= factor; + sysctl_sched_batch_wakeup_granularity *= factor; +} + #ifdef CONFIG_SMP /* * This is how migration works: @@ -5038,8 +5193,19 @@ wait_to_die: } #ifdef CONFIG_HOTPLUG_CPU + +static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) +{ + int ret; + + local_irq_disable(); + ret = __migrate_task(p, src_cpu, dest_cpu); + local_irq_enable(); + return ret; +} + /* - * Figure out where task on dead CPU should go, use force if neccessary. + * Figure out where task on dead CPU should go, use force if necessary. * NOTE: interrupts should be disabled by the caller */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) @@ -5061,8 +5227,16 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) /* No more Mr. Nice Guy. */ if (dest_cpu == NR_CPUS) { + cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); + /* + * Try to stay on the same cpuset, where the + * current cpuset may be a subset of all cpus. + * The cpuset_cpus_allowed_locked() variant of + * cpuset_cpus_allowed() will not block. It must be + * called within calls to cpuset_lock/cpuset_unlock. + */ rq = task_rq_lock(p, &flags); - cpus_setall(p->cpus_allowed); + p->cpus_allowed = cpus_allowed; dest_cpu = any_online_cpu(p->cpus_allowed); task_rq_unlock(rq, &flags); @@ -5074,9 +5248,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) if (p->mm && printk_ratelimit()) printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", - p->pid, p->comm, dead_cpu); + task_pid_nr(p), p->comm, dead_cpu); } - } while (!__migrate_task(p, dead_cpu, dest_cpu)); + } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } /* @@ -5104,7 +5278,7 @@ static void migrate_live_tasks(int src_cpu) { struct task_struct *p, *t; - write_lock_irq(&tasklist_lock); + read_lock(&tasklist_lock); do_each_thread(t, p) { if (p == current) @@ -5114,27 +5288,13 @@ static void migrate_live_tasks(int src_cpu) move_task_off_dead_cpu(src_cpu, p); } while_each_thread(t, p); - write_unlock_irq(&tasklist_lock); -} - -/* - * activate_idle_task - move idle task to the _front_ of runqueue. - */ -static void activate_idle_task(struct task_struct *p, struct rq *rq) -{ - update_rq_clock(rq); - - if (p->state == TASK_UNINTERRUPTIBLE) - rq->nr_uninterruptible--; - - enqueue_task(rq, p, 0); - inc_nr_running(p, rq); + read_unlock(&tasklist_lock); } /* * Schedules idle task to be the next runnable task on current CPU. - * It does so by boosting its priority to highest possible and adding it to - * the _front_ of the runqueue. Used by CPU offline code. + * It does so by boosting its priority to highest possible. + * Used by CPU offline code. */ void sched_idle_next(void) { @@ -5154,8 +5314,8 @@ void sched_idle_next(void) __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); - /* Add idle task to the _front_ of its priority queue: */ - activate_idle_task(p, rq); + update_rq_clock(rq); + activate_task(rq, p, 0); spin_unlock_irqrestore(&rq->lock, flags); } @@ -5181,7 +5341,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ - BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); + BUG_ON(!p->exit_state); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->state == TASK_DEAD); @@ -5192,11 +5352,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) * Drop lock around migration; if someone else moves it, * that's OK. No task can be added to this CPU, so iteration is * fine. - * NOTE: interrupts should be left disabled --dev@ */ - spin_unlock(&rq->lock); + spin_unlock_irq(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); - spin_lock(&rq->lock); + spin_lock_irq(&rq->lock); put_task_struct(p); } @@ -5227,7 +5386,7 @@ static struct ctl_table sd_ctl_dir[] = { .procname = "sched_domain", .mode = 0555, }, - {0,}, + {0, }, }; static struct ctl_table sd_ctl_root[] = { @@ -5237,7 +5396,7 @@ static struct ctl_table sd_ctl_root[] = { .mode = 0555, .child = sd_ctl_dir, }, - {0,}, + {0, }, }; static struct ctl_table *sd_alloc_ctl_entry(int n) @@ -5250,11 +5409,20 @@ static struct ctl_table *sd_alloc_ctl_entry(int n) static void sd_free_ctl_entry(struct ctl_table **tablep) { - struct ctl_table *entry = *tablep; + struct ctl_table *entry; - for (entry = *tablep; entry->procname; entry++) + /* + * In the intermediate directories, both the child directory and + * procname are dynamically allocated and could fail but the mode + * will always be set. In the lowest directory the names are + * static strings and all have proc handlers. + */ + for (entry = *tablep; entry->mode; entry++) { if (entry->child) sd_free_ctl_entry(&entry->child); + if (entry->proc_handler == NULL) + kfree(entry->procname); + } kfree(*tablep); *tablep = NULL; @@ -5303,6 +5471,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); + /* &table[11] is terminator */ return table; } @@ -5339,11 +5508,12 @@ static void register_sched_domain_sysctl(void) struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; + WARN_ON(sd_ctl_dir[0].child); + sd_ctl_dir[0].child = entry; + if (entry == NULL) return; - sd_ctl_dir[0].child = entry; - for_each_online_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); @@ -5351,14 +5521,19 @@ static void register_sched_domain_sysctl(void) entry->child = sd_alloc_ctl_cpu_table(i); entry++; } + + WARN_ON(sd_sysctl_header); sd_sysctl_header = register_sysctl_table(sd_ctl_root); } +/* may be called multiple times per register */ static void unregister_sched_domain_sysctl(void) { - unregister_sysctl_table(sd_sysctl_header); + if (sd_sysctl_header) + unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; - sd_free_ctl_entry(&sd_ctl_dir[0].child); + if (sd_ctl_dir[0].child) + sd_free_ctl_entry(&sd_ctl_dir[0].child); } #else static void register_sched_domain_sysctl(void) @@ -5401,7 +5576,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE: case CPU_ONLINE_FROZEN: - /* Strictly unneccessary, as first user will wake it. */ + /* Strictly unnecessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); break; @@ -5419,19 +5594,21 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: + cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ - rq = task_rq_lock(rq->idle, &flags); + spin_lock_irq(&rq->lock); update_rq_clock(rq); deactivate_task(rq, rq->idle, 0); rq->idle->static_prio = MAX_PRIO; __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); rq->idle->sched_class = &idle_sched_class; migrate_dead_tasks(cpu); - task_rq_unlock(rq, &flags); + spin_unlock_irq(&rq->lock); + cpuset_unlock(); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); @@ -5465,7 +5642,7 @@ static struct notifier_block __cpuinitdata migration_notifier = { .priority = 10 }; -int __init migration_init(void) +void __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; @@ -5475,8 +5652,6 @@ int __init migration_init(void) BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); - - return 0; } #endif @@ -5487,101 +5662,101 @@ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); #ifdef CONFIG_SCHED_DEBUG -static void sched_domain_debug(struct sched_domain *sd, int cpu) + +static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level) { - int level = 0; + struct sched_group *group = sd->groups; + cpumask_t groupmask; + char str[NR_CPUS]; - if (!sd) { - printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); - return; + cpumask_scnprintf(str, NR_CPUS, sd->span); + cpus_clear(groupmask); + + printk(KERN_DEBUG "%*s domain %d: ", level, "", level); + + if (!(sd->flags & SD_LOAD_BALANCE)) { + printk("does not load-balance\n"); + if (sd->parent) + printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" + " has parent"); + return -1; } - printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + printk(KERN_CONT "span %s\n", str); + + if (!cpu_isset(cpu, sd->span)) { + printk(KERN_ERR "ERROR: domain->span does not contain " + "CPU%d\n", cpu); + } + if (!cpu_isset(cpu, group->cpumask)) { + printk(KERN_ERR "ERROR: domain->groups does not contain" + " CPU%d\n", cpu); + } + printk(KERN_DEBUG "%*s groups:", level + 1, ""); do { - int i; - char str[NR_CPUS]; - struct sched_group *group = sd->groups; - cpumask_t groupmask; - - cpumask_scnprintf(str, NR_CPUS, sd->span); - cpus_clear(groupmask); - - printk(KERN_DEBUG); - for (i = 0; i < level + 1; i++) - printk(" "); - printk("domain %d: ", level); - - if (!(sd->flags & SD_LOAD_BALANCE)) { - printk("does not load-balance\n"); - if (sd->parent) - printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" - " has parent"); + if (!group) { + printk("\n"); + printk(KERN_ERR "ERROR: group is NULL\n"); break; } - printk("span %s\n", str); + if (!group->__cpu_power) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: domain->cpu_power not " + "set\n"); + break; + } - if (!cpu_isset(cpu, sd->span)) - printk(KERN_ERR "ERROR: domain->span does not contain " - "CPU%d\n", cpu); - if (!cpu_isset(cpu, group->cpumask)) - printk(KERN_ERR "ERROR: domain->groups does not contain" - " CPU%d\n", cpu); + if (!cpus_weight(group->cpumask)) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: empty group\n"); + break; + } - printk(KERN_DEBUG); - for (i = 0; i < level + 2; i++) - printk(" "); - printk("groups:"); - do { - if (!group) { - printk("\n"); - printk(KERN_ERR "ERROR: group is NULL\n"); - break; - } + if (cpus_intersects(groupmask, group->cpumask)) { + printk(KERN_CONT "\n"); + printk(KERN_ERR "ERROR: repeated CPUs\n"); + break; + } - if (!group->__cpu_power) { - printk("\n"); - printk(KERN_ERR "ERROR: domain->cpu_power not " - "set\n"); - break; - } + cpus_or(groupmask, groupmask, group->cpumask); - if (!cpus_weight(group->cpumask)) { - printk("\n"); - printk(KERN_ERR "ERROR: empty group\n"); - break; - } + cpumask_scnprintf(str, NR_CPUS, group->cpumask); + printk(KERN_CONT " %s", str); - if (cpus_intersects(groupmask, group->cpumask)) { - printk("\n"); - printk(KERN_ERR "ERROR: repeated CPUs\n"); - break; - } + group = group->next; + } while (group != sd->groups); + printk(KERN_CONT "\n"); - cpus_or(groupmask, groupmask, group->cpumask); + if (!cpus_equal(sd->span, groupmask)) + printk(KERN_ERR "ERROR: groups don't span domain->span\n"); + + if (sd->parent && !cpus_subset(groupmask, sd->parent->span)) + printk(KERN_ERR "ERROR: parent span is not a superset " + "of domain->span\n"); + return 0; +} - cpumask_scnprintf(str, NR_CPUS, group->cpumask); - printk(" %s", str); +static void sched_domain_debug(struct sched_domain *sd, int cpu) +{ + int level = 0; - group = group->next; - } while (group != sd->groups); - printk("\n"); + if (!sd) { + printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); + return; + } - if (!cpus_equal(sd->span, groupmask)) - printk(KERN_ERR "ERROR: groups don't span " - "domain->span\n"); + printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); + for (;;) { + if (sched_domain_debug_one(sd, cpu, level)) + break; level++; sd = sd->parent; if (!sd) - continue; - - if (!cpus_subset(groupmask, sd->span)) - printk(KERN_ERR "ERROR: parent span is not a superset " - "of domain->span\n"); - - } while (sd); + break; + } } #else # define sched_domain_debug(sd, cpu) do { } while (0) @@ -5846,7 +6021,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; - cpumask_t mask = cpu_sibling_map[cpu]; + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); if (sg) @@ -5875,7 +6050,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #elif defined(CONFIG_SCHED_SMT) - cpumask_t mask = cpu_sibling_map[cpu]; + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #else @@ -6109,7 +6284,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) p = sd; sd = &per_cpu(cpu_domains, i); *sd = SD_SIBLING_INIT; - sd->span = cpu_sibling_map[i]; + sd->span = per_cpu(cpu_sibling_map, i); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; @@ -6120,7 +6295,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ for_each_cpu_mask(i, *cpu_map) { - cpumask_t this_sibling_map = cpu_sibling_map[i]; + cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i); cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; @@ -6282,23 +6457,32 @@ error: return -ENOMEM; #endif } + +static cpumask_t *doms_cur; /* current sched domains */ +static int ndoms_cur; /* number of sched domains in 'doms_cur' */ + +/* + * Special case: If a kmalloc of a doms_cur partition (array of + * cpumask_t) fails, then fallback to a single sched domain, + * as determined by the single cpumask_t fallback_doms. + */ +static cpumask_t fallback_doms; + /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { - cpumask_t cpu_default_map; int err; - /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. - */ - cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); - - err = build_sched_domains(&cpu_default_map); - + ndoms_cur = 1; + doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!doms_cur) + doms_cur = &fallback_doms; + cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); + err = build_sched_domains(doms_cur); register_sched_domain_sysctl(); return err; @@ -6326,32 +6510,70 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) } /* - * Partition sched domains as specified by the cpumasks below. - * This attaches all cpus from the cpumasks to the NULL domain, - * waits for a RCU quiescent period, recalculates sched - * domain information and then attaches them back to the - * correct sched domains + * Partition sched domains as specified by the 'ndoms_new' + * cpumasks in the array doms_new[] of cpumasks. This compares + * doms_new[] to the current sched domain partitioning, doms_cur[]. + * It destroys each deleted domain and builds each new domain. + * + * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the + * current 'doms_cur' domains and in the new 'doms_new', we can leave + * it as it is. + * + * The passed in 'doms_new' should be kmalloc'd. This routine takes + * ownership of it and will kfree it when done with it. If the caller + * failed the kmalloc call, then it can pass in doms_new == NULL, + * and partition_sched_domains() will fallback to the single partition + * 'fallback_doms'. + * * Call with hotplug lock held */ -int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) +void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) { - cpumask_t change_map; - int err = 0; + int i, j; - cpus_and(*partition1, *partition1, cpu_online_map); - cpus_and(*partition2, *partition2, cpu_online_map); - cpus_or(change_map, *partition1, *partition2); + /* always unregister in case we don't destroy any domains */ + unregister_sched_domain_sysctl(); - /* Detach sched domains from all of the affected cpus */ - detach_destroy_domains(&change_map); - if (!cpus_empty(*partition1)) - err = build_sched_domains(partition1); - if (!err && !cpus_empty(*partition2)) - err = build_sched_domains(partition2); + if (doms_new == NULL) { + ndoms_new = 1; + doms_new = &fallback_doms; + cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); + } - register_sched_domain_sysctl(); + /* Destroy deleted domains */ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < ndoms_new; j++) { + if (cpus_equal(doms_cur[i], doms_new[j])) + goto match1; + } + /* no match - a current sched domain not in new doms_new[] */ + detach_destroy_domains(doms_cur + i); +match1: + ; + } - return err; + /* Build new domains */ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < ndoms_cur; j++) { + if (cpus_equal(doms_new[i], doms_cur[j])) + goto match2; + } + /* no match - add a new doms_new */ + build_sched_domains(doms_new + i); +match2: + ; + } + + /* Remember the new sched domains */ + if (doms_cur != &fallback_doms) + kfree(doms_cur); + doms_cur = doms_new; + ndoms_cur = ndoms_new; + + register_sched_domain_sysctl(); } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -6485,18 +6707,17 @@ void __init sched_init_smp(void) /* Move init over to a non-isolated CPU */ if (set_cpus_allowed(current, non_isolated_cpus) < 0) BUG(); + sched_init_granularity(); } #else void __init sched_init_smp(void) { + sched_init_granularity(); } #endif /* CONFIG_SMP */ int in_sched_functions(unsigned long addr) { - /* Linker adds these: start and end of __sched functions */ - extern char __sched_text_start[], __sched_text_end[]; - return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); @@ -6816,8 +7037,8 @@ err: /* rcu callback to free various structures associated with a task group */ static void free_sched_group(struct rcu_head *rhp) { - struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); - struct task_group *tg = cfs_rq->tg; + struct task_group *tg = container_of(rhp, struct task_group, rcu); + struct cfs_rq *cfs_rq; struct sched_entity *se; int i; @@ -6838,7 +7059,7 @@ static void free_sched_group(struct rcu_head *rhp) /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { - struct cfs_rq *cfs_rq; + struct cfs_rq *cfs_rq = NULL; int i; for_each_possible_cpu(i) { @@ -6846,10 +7067,10 @@ void sched_destroy_group(struct task_group *tg) list_del_rcu(&cfs_rq->leaf_cfs_rq_list); } - cfs_rq = tg->cfs_rq[0]; + BUG_ON(!cfs_rq); /* wait for possible concurrent references to cfs_rqs complete */ - call_rcu(&cfs_rq->rcu, free_sched_group); + call_rcu(&tg->rcu, free_sched_group); } /* change task's runqueue when it moves between groups. @@ -6865,8 +7086,10 @@ void sched_move_task(struct task_struct *tsk) rq = task_rq_lock(tsk, &flags); - if (tsk->sched_class != &fair_sched_class) + if (tsk->sched_class != &fair_sched_class) { + set_task_cfs_rq(tsk, task_cpu(tsk)); goto done; + } update_rq_clock(rq); @@ -6879,7 +7102,7 @@ void sched_move_task(struct task_struct *tsk) tsk->sched_class->put_prev_task(rq, tsk); } - set_task_cfs_rq(tsk); + set_task_cfs_rq(tsk, task_cpu(tsk)); if (on_rq) { if (unlikely(running)) @@ -6935,3 +7158,224 @@ unsigned long sched_group_shares(struct task_group *tg) } #endif /* CONFIG_FAIR_GROUP_SCHED */ + +#ifdef CONFIG_FAIR_CGROUP_SCHED + +/* return corresponding task_group object of a cgroup */ +static inline struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id), + struct task_group, css); +} + +static struct cgroup_subsys_state * +cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) +{ + struct task_group *tg; + + if (!cgrp->parent) { + /* This is early initialization for the top cgroup */ + init_task_group.css.cgroup = cgrp; + return &init_task_group.css; + } + + /* we support only 1-level deep hierarchical scheduler atm */ + if (cgrp->parent->parent) + return ERR_PTR(-EINVAL); + + tg = sched_create_group(); + if (IS_ERR(tg)) + return ERR_PTR(-ENOMEM); + + /* Bind the cgroup to task_group object we just created */ + tg->css.cgroup = cgrp; + + return &tg->css; +} + +static void cpu_cgroup_destroy(struct cgroup_subsys *ss, + struct cgroup *cgrp) +{ + struct task_group *tg = cgroup_tg(cgrp); + + sched_destroy_group(tg); +} + +static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cgrp, struct task_struct *tsk) +{ + /* We don't support RT-tasks being in separate groups */ + if (tsk->sched_class != &fair_sched_class) + return -EINVAL; + + return 0; +} + +static void +cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct cgroup *old_cont, struct task_struct *tsk) +{ + sched_move_task(tsk); +} + +static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype, + u64 shareval) +{ + return sched_group_set_shares(cgroup_tg(cgrp), shareval); +} + +static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) +{ + struct task_group *tg = cgroup_tg(cgrp); + + return (u64) tg->shares; +} + +static struct cftype cpu_files[] = { + { + .name = "shares", + .read_uint = cpu_shares_read_uint, + .write_uint = cpu_shares_write_uint, + }, +}; + +static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files)); +} + +struct cgroup_subsys cpu_cgroup_subsys = { + .name = "cpu", + .create = cpu_cgroup_create, + .destroy = cpu_cgroup_destroy, + .can_attach = cpu_cgroup_can_attach, + .attach = cpu_cgroup_attach, + .populate = cpu_cgroup_populate, + .subsys_id = cpu_cgroup_subsys_id, + .early_init = 1, +}; + +#endif /* CONFIG_FAIR_CGROUP_SCHED */ + +#ifdef CONFIG_CGROUP_CPUACCT + +/* + * CPU accounting code for task groups. + * + * Based on the work by Paul Menage (menage@google.com) and Balbir Singh + * (balbir@in.ibm.com). + */ + +/* track cpu usage of a group of tasks */ +struct cpuacct { + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every cpu */ + u64 *cpuusage; +}; + +struct cgroup_subsys cpuacct_subsys; + +/* return cpu accounting group corresponding to this container */ +static inline struct cpuacct *cgroup_ca(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* return cpu accounting group to which this task belongs */ +static inline struct cpuacct *task_ca(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* create a new cpu accounting group */ +static struct cgroup_subsys_state *cpuacct_create( + struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + + if (!ca) + return ERR_PTR(-ENOMEM); + + ca->cpuusage = alloc_percpu(u64); + if (!ca->cpuusage) { + kfree(ca); + return ERR_PTR(-ENOMEM); + } + + return &ca->css; +} + +/* destroy an existing cpu accounting group */ +static void cpuacct_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct cpuacct *ca = cgroup_ca(cont); + + free_percpu(ca->cpuusage); + kfree(ca); +} + +/* return total cpu usage (in nanoseconds) of a group */ +static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) +{ + struct cpuacct *ca = cgroup_ca(cont); + u64 totalcpuusage = 0; + int i; + + for_each_possible_cpu(i) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, i); + + /* + * Take rq->lock to make 64-bit addition safe on 32-bit + * platforms. + */ + spin_lock_irq(&cpu_rq(i)->lock); + totalcpuusage += *cpuusage; + spin_unlock_irq(&cpu_rq(i)->lock); + } + + return totalcpuusage; +} + +static struct cftype files[] = { + { + .name = "usage", + .read_uint = cpuusage_read, + }, +}; + +static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); +} + +/* + * charge this task's execution time to its accounting group. + * + * called with rq->lock held. + */ +static void cpuacct_charge(struct task_struct *tsk, u64 cputime) +{ + struct cpuacct *ca; + + if (!cpuacct_subsys.active) + return; + + ca = task_ca(tsk); + if (ca) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); + + *cpuusage += cputime; + } +} + +struct cgroup_subsys cpuacct_subsys = { + .name = "cpuacct", + .create = cpuacct_create, + .destroy = cpuacct_destroy, + .populate = cpuacct_populate, + .subsys_id = cpuacct_subsys_id, +}; +#endif /* CONFIG_CGROUP_CPUACCT */