]> err.no Git - linux-2.6/blobdiff - kernel/workqueue.c
[PATCH] make 1-bit bitfields unsigned
[linux-2.6] / kernel / workqueue.c
index 967479756511a9c15e10e8fa3fb27c346982da7f..5484d6e045c286d28b1823f15fe03e80c4c83cb4 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@ struct cpu_workqueue_struct {
        struct task_struct *thread;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
+
+       int freezeable;         /* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -241,14 +246,25 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
                work_func_t f = work->func;
-               void *data = work->data;
 
                list_del_init(cwq->worklist.next);
                spin_unlock_irqrestore(&cwq->lock, flags);
 
                BUG_ON(get_wq_data(work) != cwq);
-               clear_bit(WORK_STRUCT_PENDING, &work->management);
-               f(data);
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+                       work_release(work);
+               f(work);
+
+               if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+                       printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+                                       "%s/0x%08x/%d\n",
+                                       current->comm, preempt_count(),
+                                       current->pid);
+                       printk(KERN_ERR "    last function: ");
+                       print_symbol("%s\n", (unsigned long)f);
+                       debug_show_held_locks(current);
+                       dump_stack();
+               }
 
                spin_lock_irqsave(&cwq->lock, flags);
                cwq->remove_sequence++;
@@ -265,7 +281,8 @@ static int worker_thread(void *__cwq)
        struct k_sigaction sa;
        sigset_t blocked;
 
-       current->flags |= PF_NOFREEZE;
+       if (!cwq->freezeable)
+               current->flags |= PF_NOFREEZE;
 
        set_user_nice(current, -5);
 
@@ -288,6 +305,9 @@ static int worker_thread(void *__cwq)
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
+               if (cwq->freezeable)
+                       try_to_freeze();
+
                add_wait_queue(&cwq->more_work, &wait);
                if (list_empty(&cwq->worklist))
                        schedule();
@@ -364,7 +384,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-                                                  int cpu)
+                                                  int cpu, int freezeable)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        struct task_struct *p;
@@ -374,6 +394,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
        cwq->thread = NULL;
        cwq->insert_sequence = 0;
        cwq->remove_sequence = 0;
+       cwq->freezeable = freezeable;
        INIT_LIST_HEAD(&cwq->worklist);
        init_waitqueue_head(&cwq->more_work);
        init_waitqueue_head(&cwq->work_done);
@@ -389,7 +410,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-                                           int singlethread)
+                                           int singlethread, int freezeable)
 {
        int cpu, destroy = 0;
        struct workqueue_struct *wq;
@@ -409,7 +430,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        mutex_lock(&workqueue_mutex);
        if (singlethread) {
                INIT_LIST_HEAD(&wq->list);
-               p = create_workqueue_thread(wq, singlethread_cpu);
+               p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
                if (!p)
                        destroy = 1;
                else
@@ -417,7 +438,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        } else {
                list_add(&wq->list, &workqueues);
                for_each_online_cpu(cpu) {
-                       p = create_workqueue_thread(wq, cpu);
+                       p = create_workqueue_thread(wq, cpu, freezeable);
                        if (p) {
                                kthread_bind(p, cpu);
                                wake_up_process(p);
@@ -527,7 +548,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 /**
  * schedule_on_each_cpu - call a function on each online CPU from keventd
  * @func: the function to call
- * @info: a pointer to pass to func()
  *
  * Returns zero on success.
  * Returns -ve errno on failure.
@@ -536,7 +556,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
  *
  * schedule_on_each_cpu() is very slow.
  */
-int schedule_on_each_cpu(work_func_t func, void *info)
+int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
        struct work_struct *works;
@@ -547,7 +567,7 @@ int schedule_on_each_cpu(work_func_t func, void *info)
 
        mutex_lock(&workqueue_mutex);
        for_each_online_cpu(cpu) {
-               INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+               INIT_WORK(per_cpu_ptr(works, cpu), func);
                __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
                                per_cpu_ptr(works, cpu));
        }
@@ -591,7 +611,6 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:                the function to execute
- * @data:      data to pass to the function
  * @ew:                guaranteed storage for the execute work structure (must
  *             be available when the work executes)
  *
@@ -601,15 +620,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
  * Returns:    0 - function was executed
  *             1 - function was scheduled for execution
  */
-int execute_in_process_context(work_func_t fn, void *data,
-                              struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 {
        if (!in_interrupt()) {
-               fn(data);
+               fn(&ew->work);
                return 0;
        }
 
-       INIT_WORK(&ew->work, fn, data);
+       INIT_WORK(&ew->work, fn);
        schedule_work(&ew->work);
 
        return 1;
@@ -670,7 +688,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                mutex_lock(&workqueue_mutex);
                /* Create a new workqueue thread for it. */
                list_for_each_entry(wq, &workqueues, list) {
-                       if (!create_workqueue_thread(wq, hotcpu)) {
+                       if (!create_workqueue_thread(wq, hotcpu, 0)) {
                                printk("workqueue for %i failed\n", hotcpu);
                                return NOTIFY_BAD;
                        }