1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/smp_lock.h>
34 #include <linux/stddef.h>
35 #include <linux/unistd.h>
36 #include <linux/numa.h>
37 #include <linux/mutex.h>
38 #include <linux/notifier.h>
41 #include <asm/mmu_context.h>
43 #include <asm/spu_csa.h>
44 #include <asm/spu_priv1.h>
47 #define SPU_TIMESLICE (HZ)
49 struct spu_prio_array {
50 DECLARE_BITMAP(bitmap, MAX_PRIO);
51 struct list_head runq[MAX_PRIO];
53 struct list_head active_list[MAX_NUMNODES];
54 struct mutex active_mutex[MAX_NUMNODES];
57 static struct spu_prio_array *spu_prio;
58 static struct workqueue_struct *spu_sched_wq;
60 static inline int node_allowed(int node)
64 if (!nr_cpus_node(node))
66 mask = node_to_cpumask(node);
67 if (!cpus_intersects(mask, current->cpus_allowed))
72 void spu_start_tick(struct spu_context *ctx)
74 if (ctx->policy == SCHED_RR) {
76 * Make sure the exiting bit is cleared.
78 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
79 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
83 void spu_stop_tick(struct spu_context *ctx)
85 if (ctx->policy == SCHED_RR) {
87 * While the work can be rearming normally setting this flag
88 * makes sure it does not rearm itself anymore.
90 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
91 cancel_delayed_work(&ctx->sched_work);
95 void spu_sched_tick(struct work_struct *work)
97 struct spu_context *ctx =
98 container_of(work, struct spu_context, sched_work.work);
103 * If this context is being stopped avoid rescheduling from the
104 * scheduler tick because we would block on the state_mutex.
105 * The caller will yield the spu later on anyway.
107 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
110 mutex_lock(&ctx->state_mutex);
113 int best = sched_find_first_bit(spu_prio->bitmap);
114 if (best <= ctx->prio) {
119 mutex_unlock(&ctx->state_mutex);
126 * spu_add_to_active_list - add spu to active list
127 * @spu: spu to add to the active list
129 static void spu_add_to_active_list(struct spu *spu)
131 mutex_lock(&spu_prio->active_mutex[spu->node]);
132 list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
133 mutex_unlock(&spu_prio->active_mutex[spu->node]);
137 * spu_remove_from_active_list - remove spu from active list
138 * @spu: spu to remove from the active list
140 static void spu_remove_from_active_list(struct spu *spu)
142 int node = spu->node;
144 mutex_lock(&spu_prio->active_mutex[node]);
145 list_del_init(&spu->list);
146 mutex_unlock(&spu_prio->active_mutex[node]);
149 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
151 static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
153 blocking_notifier_call_chain(&spu_switch_notifier,
154 ctx ? ctx->object_id : 0, spu);
157 int spu_switch_event_register(struct notifier_block * n)
159 return blocking_notifier_chain_register(&spu_switch_notifier, n);
162 int spu_switch_event_unregister(struct notifier_block * n)
164 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
168 * spu_bind_context - bind spu context to physical spu
169 * @spu: physical spu to bind to
170 * @ctx: context to bind
172 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
174 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
175 spu->number, spu->node);
179 ctx->ops = &spu_hw_ops;
180 spu->pid = current->pid;
181 spu_associate_mm(spu, ctx->owner);
182 spu->ibox_callback = spufs_ibox_callback;
183 spu->wbox_callback = spufs_wbox_callback;
184 spu->stop_callback = spufs_stop_callback;
185 spu->mfc_callback = spufs_mfc_callback;
186 spu->dma_callback = spufs_dma_callback;
188 spu_unmap_mappings(ctx);
189 spu_restore(&ctx->csa, spu);
190 spu->timestamp = jiffies;
191 spu_cpu_affinity_set(spu, raw_smp_processor_id());
192 spu_switch_notify(spu, ctx);
193 spu_add_to_active_list(spu);
194 ctx->state = SPU_STATE_RUNNABLE;
198 * spu_unbind_context - unbind spu context from physical spu
199 * @spu: physical spu to unbind from
200 * @ctx: context to unbind
202 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
204 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
205 spu->pid, spu->number, spu->node);
207 spu_remove_from_active_list(spu);
208 spu_switch_notify(spu, NULL);
209 spu_unmap_mappings(ctx);
210 spu_save(&ctx->csa, spu);
211 spu->timestamp = jiffies;
212 ctx->state = SPU_STATE_SAVED;
213 spu->ibox_callback = NULL;
214 spu->wbox_callback = NULL;
215 spu->stop_callback = NULL;
216 spu->mfc_callback = NULL;
217 spu->dma_callback = NULL;
218 spu_associate_mm(spu, NULL);
220 ctx->ops = &spu_backing_ops;
227 * spu_add_to_rq - add a context to the runqueue
228 * @ctx: context to add
230 static void spu_add_to_rq(struct spu_context *ctx)
232 spin_lock(&spu_prio->runq_lock);
233 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
234 set_bit(ctx->prio, spu_prio->bitmap);
235 spin_unlock(&spu_prio->runq_lock);
239 * spu_del_from_rq - remove a context from the runqueue
240 * @ctx: context to remove
242 static void spu_del_from_rq(struct spu_context *ctx)
244 spin_lock(&spu_prio->runq_lock);
245 list_del_init(&ctx->rq);
246 if (list_empty(&spu_prio->runq[ctx->prio]))
247 clear_bit(ctx->prio, spu_prio->bitmap);
248 spin_unlock(&spu_prio->runq_lock);
252 * spu_grab_context - remove one context from the runqueue
253 * @prio: priority of the context to be removed
255 * This function removes one context from the runqueue for priority @prio.
256 * If there is more than one context with the given priority the first
257 * task on the runqueue will be taken.
259 * Returns the spu_context it just removed.
261 * Must be called with spu_prio->runq_lock held.
263 static struct spu_context *spu_grab_context(int prio)
265 struct list_head *rq = &spu_prio->runq[prio];
269 return list_entry(rq->next, struct spu_context, rq);
272 static void spu_prio_wait(struct spu_context *ctx)
276 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
277 if (!signal_pending(current)) {
278 mutex_unlock(&ctx->state_mutex);
280 mutex_lock(&ctx->state_mutex);
282 __set_current_state(TASK_RUNNING);
283 remove_wait_queue(&ctx->stop_wq, &wait);
287 * spu_reschedule - try to find a runnable context for a spu
288 * @spu: spu available
290 * This function is called whenever a spu becomes idle. It looks for the
291 * most suitable runnable spu context and schedules it for execution.
293 static void spu_reschedule(struct spu *spu)
299 spin_lock(&spu_prio->runq_lock);
300 best = sched_find_first_bit(spu_prio->bitmap);
301 if (best < MAX_PRIO) {
302 struct spu_context *ctx = spu_grab_context(best);
304 wake_up(&ctx->stop_wq);
306 spin_unlock(&spu_prio->runq_lock);
309 static struct spu *spu_get_idle(struct spu_context *ctx)
311 struct spu *spu = NULL;
312 int node = cpu_to_node(raw_smp_processor_id());
315 for (n = 0; n < MAX_NUMNODES; n++, node++) {
316 node = (node < MAX_NUMNODES) ? node : 0;
317 if (!node_allowed(node))
319 spu = spu_alloc_node(node);
327 * find_victim - find a lower priority context to preempt
328 * @ctx: canidate context for running
330 * Returns the freed physical spu to run the new context on.
332 static struct spu *find_victim(struct spu_context *ctx)
334 struct spu_context *victim = NULL;
339 * Look for a possible preemption candidate on the local node first.
340 * If there is no candidate look at the other nodes. This isn't
341 * exactly fair, but so far the whole spu schedule tries to keep
342 * a strong node affinity. We might want to fine-tune this in
346 node = cpu_to_node(raw_smp_processor_id());
347 for (n = 0; n < MAX_NUMNODES; n++, node++) {
348 node = (node < MAX_NUMNODES) ? node : 0;
349 if (!node_allowed(node))
352 mutex_lock(&spu_prio->active_mutex[node]);
353 list_for_each_entry(spu, &spu_prio->active_list[node], list) {
354 struct spu_context *tmp = spu->ctx;
356 if (tmp->rt_priority < ctx->rt_priority &&
357 (!victim || tmp->rt_priority < victim->rt_priority))
360 mutex_unlock(&spu_prio->active_mutex[node]);
364 * This nests ctx->state_mutex, but we always lock
365 * higher priority contexts before lower priority
366 * ones, so this is safe until we introduce
367 * priority inheritance schemes.
369 if (!mutex_trylock(&victim->state_mutex)) {
377 * This race can happen because we've dropped
378 * the active list mutex. No a problem, just
379 * restart the search.
381 mutex_unlock(&victim->state_mutex);
385 spu_unbind_context(spu, victim);
386 mutex_unlock(&victim->state_mutex);
395 * spu_activate - find a free spu for a context and execute it
396 * @ctx: spu context to schedule
397 * @flags: flags (currently ignored)
399 * Tries to find a free spu to run @ctx. If no free spu is available
400 * add the context to the runqueue so it gets woken up once an spu
403 int spu_activate(struct spu_context *ctx, unsigned long flags)
412 spu = spu_get_idle(ctx);
414 * If this is a realtime thread we try to get it running by
415 * preempting a lower priority thread.
417 if (!spu && ctx->rt_priority)
418 spu = find_victim(ctx);
420 spu_bind_context(spu, ctx);
426 spu_del_from_rq(ctx);
427 } while (!signal_pending(current));
433 * spu_deactivate - unbind a context from it's physical spu
434 * @ctx: spu context to unbind
436 * Unbind @ctx from the physical spu it is running on and schedule
437 * the highest priority context to run on the freed physical spu.
439 void spu_deactivate(struct spu_context *ctx)
441 struct spu *spu = ctx->spu;
444 spu_unbind_context(spu, ctx);
450 * spu_yield - yield a physical spu if others are waiting
451 * @ctx: spu context to yield
453 * Check if there is a higher priority context waiting and if yes
454 * unbind @ctx from the physical spu and schedule the highest
455 * priority context to run on the freed physical spu instead.
457 void spu_yield(struct spu_context *ctx)
461 if (mutex_trylock(&ctx->state_mutex)) {
462 if ((spu = ctx->spu) != NULL) {
463 int best = sched_find_first_bit(spu_prio->bitmap);
464 if (best < MAX_PRIO) {
465 pr_debug("%s: yielding SPU %d NODE %d\n",
466 __FUNCTION__, spu->number, spu->node);
470 mutex_unlock(&ctx->state_mutex);
474 int __init spu_sched_init(void)
478 spu_sched_wq = create_singlethread_workqueue("spusched");
482 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
484 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
486 destroy_workqueue(spu_sched_wq);
489 for (i = 0; i < MAX_PRIO; i++) {
490 INIT_LIST_HEAD(&spu_prio->runq[i]);
491 __clear_bit(i, spu_prio->bitmap);
493 __set_bit(MAX_PRIO, spu_prio->bitmap);
494 for (i = 0; i < MAX_NUMNODES; i++) {
495 mutex_init(&spu_prio->active_mutex[i]);
496 INIT_LIST_HEAD(&spu_prio->active_list[i]);
498 spin_lock_init(&spu_prio->runq_lock);
502 void __exit spu_sched_exit(void)
504 struct spu *spu, *tmp;
507 for (node = 0; node < MAX_NUMNODES; node++) {
508 mutex_lock(&spu_prio->active_mutex[node]);
509 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
511 list_del_init(&spu->list);
514 mutex_unlock(&spu_prio->active_mutex[node]);
517 destroy_workqueue(spu_sched_wq);