X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fstop_machine.c;h=319821ef78afc9e0a7488cbf9b2ea1c5853b65c1;hb=d842de871c8c5e2110c7e4f3f29bbe7b1a519ab8;hp=2c0aacc37c5513b0a3898f49e937683501f66ae2;hpb=1d77062b1402aef5b26e1d3776991126e8026bde;p=linux-2.6 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 2c0aacc37c..319821ef78 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -1,10 +1,15 @@ -#include -#include -#include +/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. + * GPL v2 and any later version. + */ #include #include -#include #include +#include +#include +#include +#include +#include + #include #include #include @@ -26,11 +31,13 @@ static unsigned int stopmachine_num_threads; static atomic_t stopmachine_thread_ack; static DECLARE_MUTEX(stopmachine_mutex); -static int stopmachine(void *unused) +static int stopmachine(void *cpu) { int irqs_disabled = 0; int prepared = 0; + set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu)); + /* Ack: we are alive */ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ atomic_inc(&stopmachine_thread_ack); @@ -40,6 +47,7 @@ static int stopmachine(void *unused) if (stopmachine_state == STOPMACHINE_DISABLE_IRQ && !irqs_disabled) { local_irq_disable(); + hard_irq_disable(); irqs_disabled = 1; /* Ack: irqs disabled. */ smp_mb(); /* Must read state first. */ @@ -84,28 +92,18 @@ static void stopmachine_set_state(enum stopmachine_state state) static int stop_machine(void) { - int ret = 0; - unsigned int i; - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - - /* One high-prio thread per cpu. We'll do this one. */ - sched_setscheduler(current, SCHED_FIFO, ¶m); + int i, ret = 0; atomic_set(&stopmachine_thread_ack, 0); stopmachine_num_threads = 0; stopmachine_state = STOPMACHINE_WAIT; for_each_online_cpu(i) { - struct task_struct *tsk; if (i == raw_smp_processor_id()) continue; - tsk = kthread_create(stopmachine, NULL, "stopmachine"); - if (IS_ERR(tsk)) { - ret = PTR_ERR(tsk); + ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); + if (ret < 0) break; - } - kthread_bind(tsk, i); - wake_up_process(tsk); stopmachine_num_threads++; } @@ -116,7 +114,6 @@ static int stop_machine(void) /* If some failed, kill them all. */ if (ret < 0) { stopmachine_set_state(STOPMACHINE_EXIT); - up(&stopmachine_mutex); return ret; } @@ -126,6 +123,7 @@ static int stop_machine(void) /* Make them disable irqs. */ local_irq_disable(); + hard_irq_disable(); stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); return 0; @@ -187,6 +185,10 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, p = kthread_create(do_stop, &smdata, "kstopmachine"); if (!IS_ERR(p)) { + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + /* One high-prio thread per cpu. We'll do this one. */ + sched_setscheduler(p, SCHED_FIFO, ¶m); kthread_bind(p, cpu); wake_up_process(p); wait_for_completion(&smdata.done); @@ -211,3 +213,4 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) return ret; } +EXPORT_SYMBOL_GPL(stop_machine_run);