]> err.no Git - linux-2.6/blobdiff - kernel/workqueue.c
workqueues: shift kthread_bind() from CPU_UP_PREPARE to CPU_ONLINE
[linux-2.6] / kernel / workqueue.c
index ea422254f8bf3a9eb463250570605e27fe362220..7d1ebfc1a995cc1f2be53a283259c4a169c5f686 100644 (file)
@@ -58,8 +58,9 @@ struct cpu_workqueue_struct {
  */
 struct workqueue_struct {
        struct cpu_workqueue_struct *cpu_wq;
+       struct list_head list;
        const char *name;
-       struct list_head list;  /* Empty if single thread */
+       int singlethread;
        int freezeable;         /* Freeze threads during suspend */
 };
 
@@ -69,13 +70,20 @@ static DEFINE_MUTEX(workqueue_mutex);
 static LIST_HEAD(workqueues);
 
 static int singlethread_cpu __read_mostly;
+static cpumask_t cpu_singlethread_map __read_mostly;
 /* optimization, we could use cpu_possible_map */
 static cpumask_t cpu_populated_map __read_mostly;
 
 /* If it's single threaded, it isn't in the list of workqueues. */
 static inline int is_single_threaded(struct workqueue_struct *wq)
 {
-       return list_empty(&wq->list);
+       return wq->singlethread;
+}
+
+static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
+{
+       return is_single_threaded(wq)
+               ? &cpu_singlethread_map : &cpu_populated_map;
 }
 
 /*
@@ -227,13 +235,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
-       unsigned long flags;
-
-       /*
-        * Keep taking off work from the queue until
-        * done.
-        */
-       spin_lock_irqsave(&cwq->lock, flags);
+       spin_lock_irq(&cwq->lock);
        cwq->run_depth++;
        if (cwq->run_depth > 3) {
                /* morton gets to eat his hat */
@@ -248,7 +250,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
 
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
-               spin_unlock_irqrestore(&cwq->lock, flags);
+               spin_unlock_irq(&cwq->lock);
 
                BUG_ON(get_wq_data(work) != cwq);
                if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
@@ -266,11 +268,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                        dump_stack();
                }
 
-               spin_lock_irqsave(&cwq->lock, flags);
+               spin_lock_irq(&cwq->lock);
                cwq->current_work = NULL;
        }
        cwq->run_depth--;
-       spin_unlock_irqrestore(&cwq->lock, flags);
+       spin_unlock_irq(&cwq->lock);
 }
 
 /*
@@ -399,14 +401,12 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  */
 void fastcall flush_workqueue(struct workqueue_struct *wq)
 {
-       if (is_single_threaded(wq))
-               flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
-       else {
-               int cpu;
+       const cpumask_t *cpu_map = wq_cpu_map(wq);
+       int cpu;
 
-               for_each_cpu_mask(cpu, cpu_populated_map)
-                       flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
-       }
+       might_sleep();
+       for_each_cpu_mask(cpu, *cpu_map)
+               flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
@@ -443,7 +443,11 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
  */
 void flush_work(struct workqueue_struct *wq, struct work_struct *work)
 {
+       const cpumask_t *cpu_map = wq_cpu_map(wq);
        struct cpu_workqueue_struct *cwq;
+       int cpu;
+
+       might_sleep();
 
        cwq = get_wq_data(work);
        /* Was it ever queued ? */
@@ -459,14 +463,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
        work_release(work);
        spin_unlock_irq(&cwq->lock);
 
-       if (is_single_threaded(wq))
-               wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
-       else {
-               int cpu;
-
-               for_each_cpu_mask(cpu, cpu_populated_map)
-                       wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
-       }
+       for_each_cpu_mask(cpu, *cpu_map)
+               wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
@@ -571,6 +569,10 @@ EXPORT_SYMBOL(flush_work_keventd);
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
                                       struct delayed_work *dwork)
 {
+       /* Was it ever queued ? */
+       if (!get_wq_data(&dwork->work))
+               return;
+
        while (!cancel_delayed_work(dwork))
                flush_workqueue(wq);
 }
@@ -666,15 +668,21 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 
        cwq->thread = p;
        cwq->should_stop = 0;
-       if (!is_single_threaded(wq))
-               kthread_bind(p, cpu);
-
-       if (is_single_threaded(wq) || cpu_online(cpu))
-               wake_up_process(p);
 
        return 0;
 }
 
+static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+{
+       struct task_struct *p = cwq->thread;
+
+       if (p != NULL) {
+               if (cpu >= 0)
+                       kthread_bind(p, cpu);
+               wake_up_process(p);
+       }
+}
+
 struct workqueue_struct *__create_workqueue(const char *name,
                                            int singlethread, int freezeable)
 {
@@ -693,12 +701,14 @@ struct workqueue_struct *__create_workqueue(const char *name,
        }
 
        wq->name = name;
+       wq->singlethread = singlethread;
        wq->freezeable = freezeable;
+       INIT_LIST_HEAD(&wq->list);
 
        if (singlethread) {
-               INIT_LIST_HEAD(&wq->list);
                cwq = init_cpu_workqueue(wq, singlethread_cpu);
                err = create_workqueue_thread(cwq, singlethread_cpu);
+               start_workqueue_thread(cwq, -1);
        } else {
                mutex_lock(&workqueue_mutex);
                list_add(&wq->list, &workqueues);
@@ -708,6 +718,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
                        if (err || !cpu_online(cpu))
                                continue;
                        err = create_workqueue_thread(cwq, cpu);
+                       start_workqueue_thread(cwq, cpu);
                }
                mutex_unlock(&workqueue_mutex);
        }
@@ -755,22 +766,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
+       const cpumask_t *cpu_map = wq_cpu_map(wq);
        struct cpu_workqueue_struct *cwq;
+       int cpu;
 
-       if (is_single_threaded(wq)) {
-               cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
-               cleanup_workqueue_thread(cwq, singlethread_cpu);
-       } else {
-               int cpu;
+       mutex_lock(&workqueue_mutex);
+       list_del(&wq->list);
+       mutex_unlock(&workqueue_mutex);
 
-               mutex_lock(&workqueue_mutex);
-               list_del(&wq->list);
-               mutex_unlock(&workqueue_mutex);
-
-               for_each_cpu_mask(cpu, cpu_populated_map) {
-                       cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-                       cleanup_workqueue_thread(cwq, cpu);
-               }
+       for_each_cpu_mask(cpu, *cpu_map) {
+               cwq = per_cpu_ptr(wq->cpu_wq, cpu);
+               cleanup_workqueue_thread(cwq, cpu);
        }
 
        free_percpu(wq->cpu_wq);
@@ -810,12 +816,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                        return NOTIFY_BAD;
 
                case CPU_ONLINE:
-                       wake_up_process(cwq->thread);
+                       start_workqueue_thread(cwq, cpu);
                        break;
 
                case CPU_UP_CANCELED:
-                       if (cwq->thread)
-                               wake_up_process(cwq->thread);
+                       start_workqueue_thread(cwq, -1);
                case CPU_DEAD:
                        cleanup_workqueue_thread(cwq, cpu);
                        break;
@@ -825,10 +830,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-void init_workqueues(void)
+void __init init_workqueues(void)
 {
        cpu_populated_map = cpu_online_map;
        singlethread_cpu = first_cpu(cpu_possible_map);
+       cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);