2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
15 cpu_set(rq->cpu, rq->rd->rto_mask);
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
24 atomic_inc(&rq->rd->rto_count);
27 static inline void rt_clear_overload(struct rq *rq)
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
34 static void update_rt_migration(struct rq *rq)
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
39 rq->rt.overloaded = 1;
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
46 #endif /* CONFIG_SMP */
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
50 return container_of(rt_se, struct task_struct, rt);
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
55 return !list_empty(&rt_se->run_list);
58 #ifdef CONFIG_RT_GROUP_SCHED
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
65 return rt_rq->tg->rt_bandwidth.rt_runtime;
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
81 #define for_each_sched_rt_entity(rt_se) \
82 for (; rt_se; rt_se = rt_se->parent)
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
92 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
94 struct sched_rt_entity *rt_se = rt_rq->rt_se;
96 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
99 enqueue_rt_entity(rt_se);
100 if (rt_rq->highest_prio < curr->prio)
105 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
107 struct sched_rt_entity *rt_se = rt_rq->rt_se;
109 if (rt_se && on_rt_rq(rt_se))
110 dequeue_rt_entity(rt_se);
113 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
115 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
118 static int rt_se_boosted(struct sched_rt_entity *rt_se)
120 struct rt_rq *rt_rq = group_rt_rq(rt_se);
121 struct task_struct *p;
124 return !!rt_rq->rt_nr_boosted;
126 p = rt_task_of(rt_se);
127 return p->prio != p->normal_prio;
131 static inline cpumask_t sched_rt_period_mask(void)
133 return cpu_rq(smp_processor_id())->rd->span;
136 static inline cpumask_t sched_rt_period_mask(void)
138 return cpu_online_map;
143 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
145 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
150 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
152 return def_rt_bandwidth.rt_runtime;
155 #define for_each_leaf_rt_rq(rt_rq, rq) \
156 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
158 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
160 return container_of(rt_rq, struct rq, rt);
163 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
165 struct task_struct *p = rt_task_of(rt_se);
166 struct rq *rq = task_rq(p);
171 #define for_each_sched_rt_entity(rt_se) \
172 for (; rt_se; rt_se = NULL)
174 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
179 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
183 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
187 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
189 return rt_rq->rt_throttled;
192 static inline cpumask_t sched_rt_period_mask(void)
194 return cpu_online_map;
198 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
200 return &cpu_rq(cpu)->rt;
205 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
210 if (rt_b->rt_runtime == RUNTIME_INF)
213 span = sched_rt_period_mask();
214 for_each_cpu_mask(i, span) {
216 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
217 struct rq *rq = rq_of_rt_rq(rt_rq);
219 spin_lock(&rq->lock);
220 if (rt_rq->rt_time) {
221 u64 runtime = rt_b->rt_runtime;
223 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
224 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
225 rt_rq->rt_throttled = 0;
228 if (rt_rq->rt_time || rt_rq->rt_nr_running)
233 sched_rt_rq_enqueue(rt_rq);
234 spin_unlock(&rq->lock);
240 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
242 #ifdef CONFIG_RT_GROUP_SCHED
243 struct rt_rq *rt_rq = group_rt_rq(rt_se);
246 return rt_rq->highest_prio;
249 return rt_task_of(rt_se)->prio;
252 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
254 u64 runtime = sched_rt_runtime(rt_rq);
256 if (runtime == RUNTIME_INF)
259 if (rt_rq->rt_throttled)
260 return rt_rq_throttled(rt_rq);
262 if (rt_rq->rt_time > runtime) {
263 rt_rq->rt_throttled = 1;
264 if (rt_rq_throttled(rt_rq)) {
265 sched_rt_rq_dequeue(rt_rq);
274 * Update the current task's runtime statistics. Skip current tasks that
275 * are not in our scheduling class.
277 static void update_curr_rt(struct rq *rq)
279 struct task_struct *curr = rq->curr;
280 struct sched_rt_entity *rt_se = &curr->rt;
281 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
284 if (!task_has_rt_policy(curr))
287 delta_exec = rq->clock - curr->se.exec_start;
288 if (unlikely((s64)delta_exec < 0))
291 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
293 curr->se.sum_exec_runtime += delta_exec;
294 curr->se.exec_start = rq->clock;
295 cpuacct_charge(curr, delta_exec);
297 rt_rq->rt_time += delta_exec;
298 if (sched_rt_runtime_exceeded(rt_rq))
303 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
305 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
306 rt_rq->rt_nr_running++;
307 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
308 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
309 rt_rq->highest_prio = rt_se_prio(rt_se);
312 if (rt_se->nr_cpus_allowed > 1) {
313 struct rq *rq = rq_of_rt_rq(rt_rq);
314 rq->rt.rt_nr_migratory++;
317 update_rt_migration(rq_of_rt_rq(rt_rq));
319 #ifdef CONFIG_RT_GROUP_SCHED
320 if (rt_se_boosted(rt_se))
321 rt_rq->rt_nr_boosted++;
324 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
326 start_rt_bandwidth(&def_rt_bandwidth);
331 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
333 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
334 WARN_ON(!rt_rq->rt_nr_running);
335 rt_rq->rt_nr_running--;
336 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
337 if (rt_rq->rt_nr_running) {
338 struct rt_prio_array *array;
340 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
341 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
343 array = &rt_rq->active;
344 rt_rq->highest_prio =
345 sched_find_first_bit(array->bitmap);
346 } /* otherwise leave rq->highest prio alone */
348 rt_rq->highest_prio = MAX_RT_PRIO;
351 if (rt_se->nr_cpus_allowed > 1) {
352 struct rq *rq = rq_of_rt_rq(rt_rq);
353 rq->rt.rt_nr_migratory--;
356 update_rt_migration(rq_of_rt_rq(rt_rq));
357 #endif /* CONFIG_SMP */
358 #ifdef CONFIG_RT_GROUP_SCHED
359 if (rt_se_boosted(rt_se))
360 rt_rq->rt_nr_boosted--;
362 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
366 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
368 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
369 struct rt_prio_array *array = &rt_rq->active;
370 struct rt_rq *group_rq = group_rt_rq(rt_se);
372 if (group_rq && rt_rq_throttled(group_rq))
375 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
376 __set_bit(rt_se_prio(rt_se), array->bitmap);
378 inc_rt_tasks(rt_se, rt_rq);
381 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
383 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
384 struct rt_prio_array *array = &rt_rq->active;
386 list_del_init(&rt_se->run_list);
387 if (list_empty(array->queue + rt_se_prio(rt_se)))
388 __clear_bit(rt_se_prio(rt_se), array->bitmap);
390 dec_rt_tasks(rt_se, rt_rq);
394 * Because the prio of an upper entry depends on the lower
395 * entries, we must remove entries top - down.
397 * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
398 * doesn't matter much for now, as h=2 for GROUP_SCHED.
400 static void dequeue_rt_stack(struct task_struct *p)
402 struct sched_rt_entity *rt_se, *top_se;
405 * dequeue all, top - down.
410 for_each_sched_rt_entity(rt_se) {
415 dequeue_rt_entity(top_se);
420 * Adding/removing a task to/from a priority array:
422 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
424 struct sched_rt_entity *rt_se = &p->rt;
432 * enqueue everybody, bottom - up.
434 for_each_sched_rt_entity(rt_se)
435 enqueue_rt_entity(rt_se);
438 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
440 struct sched_rt_entity *rt_se = &p->rt;
448 * re-enqueue all non-empty rt_rq entities.
450 for_each_sched_rt_entity(rt_se) {
451 rt_rq = group_rt_rq(rt_se);
452 if (rt_rq && rt_rq->rt_nr_running)
453 enqueue_rt_entity(rt_se);
458 * Put task to the end of the run list without the overhead of dequeue
459 * followed by enqueue.
462 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
464 struct rt_prio_array *array = &rt_rq->active;
466 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
469 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
471 struct sched_rt_entity *rt_se = &p->rt;
474 for_each_sched_rt_entity(rt_se) {
475 rt_rq = rt_rq_of_se(rt_se);
476 requeue_rt_entity(rt_rq, rt_se);
480 static void yield_task_rt(struct rq *rq)
482 requeue_task_rt(rq, rq->curr);
486 static int find_lowest_rq(struct task_struct *task);
488 static int select_task_rq_rt(struct task_struct *p, int sync)
490 struct rq *rq = task_rq(p);
493 * If the current task is an RT task, then
494 * try to see if we can wake this RT task up on another
495 * runqueue. Otherwise simply start this RT task
496 * on its current runqueue.
498 * We want to avoid overloading runqueues. Even if
499 * the RT task is of higher priority than the current RT task.
500 * RT tasks behave differently than other tasks. If
501 * one gets preempted, we try to push it off to another queue.
502 * So trying to keep a preempting RT task on the same
503 * cache hot CPU will force the running RT task to
504 * a cold CPU. So we waste all the cache for the lower
505 * RT task in hopes of saving some of a RT task
506 * that is just being woken and probably will have
509 if (unlikely(rt_task(rq->curr)) &&
510 (p->rt.nr_cpus_allowed > 1)) {
511 int cpu = find_lowest_rq(p);
513 return (cpu == -1) ? task_cpu(p) : cpu;
517 * Otherwise, just let it ride on the affined RQ and the
518 * post-schedule router will push the preempted task away
522 #endif /* CONFIG_SMP */
525 * Preempt the current task with a newly woken task if needed:
527 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
529 if (p->prio < rq->curr->prio)
530 resched_task(rq->curr);
533 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
536 struct rt_prio_array *array = &rt_rq->active;
537 struct sched_rt_entity *next = NULL;
538 struct list_head *queue;
541 idx = sched_find_first_bit(array->bitmap);
542 BUG_ON(idx >= MAX_RT_PRIO);
544 queue = array->queue + idx;
545 next = list_entry(queue->next, struct sched_rt_entity, run_list);
550 static struct task_struct *pick_next_task_rt(struct rq *rq)
552 struct sched_rt_entity *rt_se;
553 struct task_struct *p;
558 if (unlikely(!rt_rq->rt_nr_running))
561 if (rt_rq_throttled(rt_rq))
565 rt_se = pick_next_rt_entity(rq, rt_rq);
567 rt_rq = group_rt_rq(rt_se);
570 p = rt_task_of(rt_se);
571 p->se.exec_start = rq->clock;
575 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
578 p->se.exec_start = 0;
583 /* Only try algorithms three times */
584 #define RT_MAX_TRIES 3
586 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
587 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
589 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
591 if (!task_running(rq, p) &&
592 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
593 (p->rt.nr_cpus_allowed > 1))
598 /* Return the second highest RT task, NULL otherwise */
599 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
601 struct task_struct *next = NULL;
602 struct sched_rt_entity *rt_se;
603 struct rt_prio_array *array;
607 for_each_leaf_rt_rq(rt_rq, rq) {
608 array = &rt_rq->active;
609 idx = sched_find_first_bit(array->bitmap);
611 if (idx >= MAX_RT_PRIO)
613 if (next && next->prio < idx)
615 list_for_each_entry(rt_se, array->queue + idx, run_list) {
616 struct task_struct *p = rt_task_of(rt_se);
617 if (pick_rt_task(rq, p, cpu)) {
623 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
631 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
633 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
635 int lowest_prio = -1;
640 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
643 * Scan each rq for the lowest prio.
645 for_each_cpu_mask(cpu, *lowest_mask) {
646 struct rq *rq = cpu_rq(cpu);
648 /* We look for lowest RT prio or non-rt CPU */
649 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
651 * if we already found a low RT queue
652 * and now we found this non-rt queue
653 * clear the mask and set our bit.
654 * Otherwise just return the queue as is
655 * and the count==1 will cause the algorithm
656 * to use the first bit found.
658 if (lowest_cpu != -1) {
659 cpus_clear(*lowest_mask);
660 cpu_set(rq->cpu, *lowest_mask);
665 /* no locking for now */
666 if ((rq->rt.highest_prio > task->prio)
667 && (rq->rt.highest_prio >= lowest_prio)) {
668 if (rq->rt.highest_prio > lowest_prio) {
669 /* new low - clear old data */
670 lowest_prio = rq->rt.highest_prio;
676 cpu_clear(cpu, *lowest_mask);
680 * Clear out all the set bits that represent
681 * runqueues that were of higher prio than
684 if (lowest_cpu > 0) {
686 * Perhaps we could add another cpumask op to
687 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
688 * Then that could be optimized to use memset and such.
690 for_each_cpu_mask(cpu, *lowest_mask) {
691 if (cpu >= lowest_cpu)
693 cpu_clear(cpu, *lowest_mask);
700 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
704 /* "this_cpu" is cheaper to preempt than a remote processor */
705 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
708 first = first_cpu(*mask);
709 if (first != NR_CPUS)
715 static int find_lowest_rq(struct task_struct *task)
717 struct sched_domain *sd;
718 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
719 int this_cpu = smp_processor_id();
720 int cpu = task_cpu(task);
721 int count = find_lowest_cpus(task, lowest_mask);
724 return -1; /* No targets found */
727 * There is no sense in performing an optimal search if only one
731 return first_cpu(*lowest_mask);
734 * At this point we have built a mask of cpus representing the
735 * lowest priority tasks in the system. Now we want to elect
736 * the best one based on our affinity and topology.
738 * We prioritize the last cpu that the task executed on since
739 * it is most likely cache-hot in that location.
741 if (cpu_isset(cpu, *lowest_mask))
745 * Otherwise, we consult the sched_domains span maps to figure
746 * out which cpu is logically closest to our hot cache data.
749 this_cpu = -1; /* Skip this_cpu opt if the same */
751 for_each_domain(cpu, sd) {
752 if (sd->flags & SD_WAKE_AFFINE) {
753 cpumask_t domain_mask;
756 cpus_and(domain_mask, sd->span, *lowest_mask);
758 best_cpu = pick_optimal_cpu(this_cpu,
766 * And finally, if there were no matches within the domains
767 * just give the caller *something* to work with from the compatible
770 return pick_optimal_cpu(this_cpu, lowest_mask);
773 /* Will lock the rq it finds */
774 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
776 struct rq *lowest_rq = NULL;
780 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
781 cpu = find_lowest_rq(task);
783 if ((cpu == -1) || (cpu == rq->cpu))
786 lowest_rq = cpu_rq(cpu);
788 /* if the prio of this runqueue changed, try again */
789 if (double_lock_balance(rq, lowest_rq)) {
791 * We had to unlock the run queue. In
792 * the mean time, task could have
793 * migrated already or had its affinity changed.
794 * Also make sure that it wasn't scheduled on its rq.
796 if (unlikely(task_rq(task) != rq ||
797 !cpu_isset(lowest_rq->cpu,
798 task->cpus_allowed) ||
799 task_running(rq, task) ||
802 spin_unlock(&lowest_rq->lock);
808 /* If this rq is still suitable use it. */
809 if (lowest_rq->rt.highest_prio > task->prio)
813 spin_unlock(&lowest_rq->lock);
821 * If the current CPU has more than one RT task, see if the non
822 * running task can migrate over to a CPU that is running a task
823 * of lesser priority.
825 static int push_rt_task(struct rq *rq)
827 struct task_struct *next_task;
828 struct rq *lowest_rq;
830 int paranoid = RT_MAX_TRIES;
832 if (!rq->rt.overloaded)
835 next_task = pick_next_highest_task_rt(rq, -1);
840 if (unlikely(next_task == rq->curr)) {
846 * It's possible that the next_task slipped in of
847 * higher priority than current. If that's the case
848 * just reschedule current.
850 if (unlikely(next_task->prio < rq->curr->prio)) {
851 resched_task(rq->curr);
855 /* We might release rq lock */
856 get_task_struct(next_task);
858 /* find_lock_lowest_rq locks the rq if found */
859 lowest_rq = find_lock_lowest_rq(next_task, rq);
861 struct task_struct *task;
863 * find lock_lowest_rq releases rq->lock
864 * so it is possible that next_task has changed.
865 * If it has, then try again.
867 task = pick_next_highest_task_rt(rq, -1);
868 if (unlikely(task != next_task) && task && paranoid--) {
869 put_task_struct(next_task);
876 deactivate_task(rq, next_task, 0);
877 set_task_cpu(next_task, lowest_rq->cpu);
878 activate_task(lowest_rq, next_task, 0);
880 resched_task(lowest_rq->curr);
882 spin_unlock(&lowest_rq->lock);
886 put_task_struct(next_task);
892 * TODO: Currently we just use the second highest prio task on
893 * the queue, and stop when it can't migrate (or there's
894 * no more RT tasks). There may be a case where a lower
895 * priority RT task has a different affinity than the
896 * higher RT task. In this case the lower RT task could
897 * possibly be able to migrate where as the higher priority
898 * RT task could not. We currently ignore this issue.
899 * Enhancements are welcome!
901 static void push_rt_tasks(struct rq *rq)
903 /* push_rt_task will return true if it moved an RT */
904 while (push_rt_task(rq))
908 static int pull_rt_task(struct rq *this_rq)
910 int this_cpu = this_rq->cpu, ret = 0, cpu;
911 struct task_struct *p, *next;
914 if (likely(!rt_overloaded(this_rq)))
917 next = pick_next_task_rt(this_rq);
919 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
923 src_rq = cpu_rq(cpu);
925 * We can potentially drop this_rq's lock in
926 * double_lock_balance, and another CPU could
927 * steal our next task - hence we must cause
928 * the caller to recalculate the next task
931 if (double_lock_balance(this_rq, src_rq)) {
932 struct task_struct *old_next = next;
934 next = pick_next_task_rt(this_rq);
935 if (next != old_next)
940 * Are there still pullable RT tasks?
942 if (src_rq->rt.rt_nr_running <= 1)
945 p = pick_next_highest_task_rt(src_rq, this_cpu);
948 * Do we have an RT task that preempts
949 * the to-be-scheduled task?
951 if (p && (!next || (p->prio < next->prio))) {
952 WARN_ON(p == src_rq->curr);
953 WARN_ON(!p->se.on_rq);
956 * There's a chance that p is higher in priority
957 * than what's currently running on its cpu.
958 * This is just that p is wakeing up and hasn't
959 * had a chance to schedule. We only pull
960 * p if it is lower in priority than the
961 * current task on the run queue or
962 * this_rq next task is lower in prio than
963 * the current task on that rq.
965 if (p->prio < src_rq->curr->prio ||
966 (next && next->prio < src_rq->curr->prio))
971 deactivate_task(src_rq, p, 0);
972 set_task_cpu(p, this_cpu);
973 activate_task(this_rq, p, 0);
975 * We continue with the search, just in
976 * case there's an even higher prio task
977 * in another runqueue. (low likelyhood
980 * Update next so that we won't pick a task
981 * on another cpu with a priority lower (or equal)
982 * than the one we just picked.
988 spin_unlock(&src_rq->lock);
994 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
996 /* Try to pull RT tasks here if we lower this rq's prio */
997 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1001 static void post_schedule_rt(struct rq *rq)
1004 * If we have more than one rt_task queued, then
1005 * see if we can push the other rt_tasks off to other CPUS.
1006 * Note we may release the rq lock, and since
1007 * the lock was owned by prev, we need to release it
1008 * first via finish_lock_switch and then reaquire it here.
1010 if (unlikely(rq->rt.overloaded)) {
1011 spin_lock_irq(&rq->lock);
1013 spin_unlock_irq(&rq->lock);
1018 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1020 if (!task_running(rq, p) &&
1021 (p->prio >= rq->rt.highest_prio) &&
1026 static unsigned long
1027 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1028 unsigned long max_load_move,
1029 struct sched_domain *sd, enum cpu_idle_type idle,
1030 int *all_pinned, int *this_best_prio)
1032 /* don't touch RT tasks */
1037 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1038 struct sched_domain *sd, enum cpu_idle_type idle)
1040 /* don't touch RT tasks */
1044 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
1046 int weight = cpus_weight(*new_mask);
1048 BUG_ON(!rt_task(p));
1051 * Update the migration status of the RQ if we have an RT task
1052 * which is running AND changing its weight value.
1054 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1055 struct rq *rq = task_rq(p);
1057 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1058 rq->rt.rt_nr_migratory++;
1059 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1060 BUG_ON(!rq->rt.rt_nr_migratory);
1061 rq->rt.rt_nr_migratory--;
1064 update_rt_migration(rq);
1067 p->cpus_allowed = *new_mask;
1068 p->rt.nr_cpus_allowed = weight;
1071 /* Assumes rq->lock is held */
1072 static void join_domain_rt(struct rq *rq)
1074 if (rq->rt.overloaded)
1075 rt_set_overload(rq);
1078 /* Assumes rq->lock is held */
1079 static void leave_domain_rt(struct rq *rq)
1081 if (rq->rt.overloaded)
1082 rt_clear_overload(rq);
1086 * When switch from the rt queue, we bring ourselves to a position
1087 * that we might want to pull RT tasks from other runqueues.
1089 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1093 * If there are other RT tasks then we will reschedule
1094 * and the scheduling of the other RT tasks will handle
1095 * the balancing. But if we are the last RT task
1096 * we may need to handle the pulling of RT tasks
1099 if (!rq->rt.rt_nr_running)
1102 #endif /* CONFIG_SMP */
1105 * When switching a task to RT, we may overload the runqueue
1106 * with RT tasks. In this case we try to push them off to
1109 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1112 int check_resched = 1;
1115 * If we are already running, then there's nothing
1116 * that needs to be done. But if we are not running
1117 * we may need to preempt the current running task.
1118 * If that current running task is also an RT task
1119 * then see if we can move to another run queue.
1123 if (rq->rt.overloaded && push_rt_task(rq) &&
1124 /* Don't resched if we changed runqueues */
1127 #endif /* CONFIG_SMP */
1128 if (check_resched && p->prio < rq->curr->prio)
1129 resched_task(rq->curr);
1134 * Priority of the task has changed. This may cause
1135 * us to initiate a push or pull.
1137 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1138 int oldprio, int running)
1143 * If our priority decreases while running, we
1144 * may need to pull tasks to this runqueue.
1146 if (oldprio < p->prio)
1149 * If there's a higher priority task waiting to run
1150 * then reschedule. Note, the above pull_rt_task
1151 * can release the rq lock and p could migrate.
1152 * Only reschedule if p is still on the same runqueue.
1154 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1157 /* For UP simply resched on drop of prio */
1158 if (oldprio < p->prio)
1160 #endif /* CONFIG_SMP */
1163 * This task is not running, but if it is
1164 * greater than the current running task
1167 if (p->prio < rq->curr->prio)
1168 resched_task(rq->curr);
1172 static void watchdog(struct rq *rq, struct task_struct *p)
1174 unsigned long soft, hard;
1179 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1180 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1182 if (soft != RLIM_INFINITY) {
1186 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1187 if (p->rt.timeout > next)
1188 p->it_sched_expires = p->se.sum_exec_runtime;
1192 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1199 * RR tasks need a special form of timeslice management.
1200 * FIFO tasks have no timeslices.
1202 if (p->policy != SCHED_RR)
1205 if (--p->rt.time_slice)
1208 p->rt.time_slice = DEF_TIMESLICE;
1211 * Requeue to the end of queue if we are not the only element
1214 if (p->rt.run_list.prev != p->rt.run_list.next) {
1215 requeue_task_rt(rq, p);
1216 set_tsk_need_resched(p);
1220 static void set_curr_task_rt(struct rq *rq)
1222 struct task_struct *p = rq->curr;
1224 p->se.exec_start = rq->clock;
1227 const struct sched_class rt_sched_class = {
1228 .next = &fair_sched_class,
1229 .enqueue_task = enqueue_task_rt,
1230 .dequeue_task = dequeue_task_rt,
1231 .yield_task = yield_task_rt,
1233 .select_task_rq = select_task_rq_rt,
1234 #endif /* CONFIG_SMP */
1236 .check_preempt_curr = check_preempt_curr_rt,
1238 .pick_next_task = pick_next_task_rt,
1239 .put_prev_task = put_prev_task_rt,
1242 .load_balance = load_balance_rt,
1243 .move_one_task = move_one_task_rt,
1244 .set_cpus_allowed = set_cpus_allowed_rt,
1245 .join_domain = join_domain_rt,
1246 .leave_domain = leave_domain_rt,
1247 .pre_schedule = pre_schedule_rt,
1248 .post_schedule = post_schedule_rt,
1249 .task_wake_up = task_wake_up_rt,
1250 .switched_from = switched_from_rt,
1253 .set_curr_task = set_curr_task_rt,
1254 .task_tick = task_tick_rt,
1256 .prio_changed = prio_changed_rt,
1257 .switched_to = switched_to_rt,