* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
- * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
+ * Made to use alloc_percpu by Christoph Lameter.
*/
#include <linux/module.h>
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
+#include <linux/lockdep.h>
/*
* The per-CPU workqueue (if single thread, we always use the first
struct workqueue_struct *wq;
struct task_struct *thread;
- int should_stop;
int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
*/
struct workqueue_struct {
struct cpu_workqueue_struct *cpu_wq;
+ struct list_head list;
const char *name;
- struct list_head list; /* Empty if single thread */
+ int singlethread;
int freezeable; /* Freeze threads during suspend */
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
};
-/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
- threads to each one as cpus come/go. */
-static DEFINE_MUTEX(workqueue_mutex);
+/* Serializes the accesses to the list of workqueues. */
+static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
static int singlethread_cpu __read_mostly;
-/* optimization, we could use cpu_possible_map */
+static cpumask_t cpu_singlethread_map __read_mostly;
+/*
+ * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
+ * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
+ * which comes in between can't use for_each_online_cpu(). We could
+ * use cpu_possible_map, the cpumask below is more a documentation
+ * than optimization.
+ */
static cpumask_t cpu_populated_map __read_mostly;
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
- return list_empty(&wq->list);
+ return wq->singlethread;
+}
+
+static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
+{
+ return is_single_threaded(wq)
+ ? &cpu_singlethread_map : &cpu_populated_map;
+}
+
+static
+struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
+{
+ if (unlikely(is_single_threaded(wq)))
+ cpu = singlethread_cpu;
+ return per_cpu_ptr(wq->cpu_wq, cpu);
}
/*
* Set the workqueue on which a work item is to be run
* - Must *only* be called if the pending flag is set
*/
-static inline void set_wq_data(struct work_struct *work, void *wq)
+static inline void set_wq_data(struct work_struct *work,
+ struct cpu_workqueue_struct *cwq)
{
unsigned long new;
BUG_ON(!work_pending(work));
- new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+ new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
atomic_long_set(&work->data, new);
}
-static inline void *get_wq_data(struct work_struct *work)
+static inline
+struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
{
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
}
struct work_struct *work, int tail)
{
set_wq_data(work, cwq);
+ /*
+ * Ensure that we get the right work->data if we see the
+ * result of list_add() below, see try_to_grab_pending().
+ */
+ smp_wmb();
if (tail)
list_add_tail(&work->entry, &cwq->worklist);
else
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*
- * We queue the work to the CPU it was submitted, but there is no
- * guarantee that it will be processed by that CPU.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
*/
-int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
+int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
- int ret = 0, cpu = get_cpu();
+ int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
- if (unlikely(is_single_threaded(wq)))
- cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+ __queue_work(wq_per_cpu(wq, get_cpu()), work);
+ put_cpu();
ret = 1;
}
- put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(queue_work);
-void delayed_work_timer_fn(unsigned long __data)
+static void delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
- struct workqueue_struct *wq = get_wq_data(&dwork->work);
- int cpu = smp_processor_id();
-
- if (unlikely(is_single_threaded(wq)))
- cpu = singlethread_cpu;
+ struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
+ struct workqueue_struct *wq = cwq->wq;
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
+ __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
}
/**
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
-int fastcall queue_delayed_work(struct workqueue_struct *wq,
+int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay)
{
- int ret = 0;
- struct timer_list *timer = &dwork->timer;
- struct work_struct *work = &dwork->work;
-
- timer_stats_timer_set_start_info(timer);
if (delay == 0)
- return queue_work(wq, work);
+ return queue_work(wq, &dwork->work);
- if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
- BUG_ON(timer_pending(timer));
- BUG_ON(!list_empty(&work->entry));
-
- /* This stores wq for the moment, for the timer_fn */
- set_wq_data(work, wq);
- timer->expires = jiffies + delay;
- timer->data = (unsigned long)dwork;
- timer->function = delayed_work_timer_fn;
- add_timer(timer);
- ret = 1;
- }
- return ret;
+ return queue_delayed_work_on(-1, wq, dwork, delay);
}
EXPORT_SYMBOL_GPL(queue_delayed_work);
BUG_ON(timer_pending(timer));
BUG_ON(!list_empty(&work->entry));
- /* This stores wq for the moment, for the timer_fn */
- set_wq_data(work, wq);
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+ /* This stores cwq for the moment, for the timer_fn */
+ set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
timer->expires = jiffies + delay;
timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
- add_timer_on(timer, cpu);
+
+ if (unlikely(cpu >= 0))
+ add_timer_on(timer, cpu);
+ else
+ add_timer(timer);
ret = 1;
}
return ret;
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
- unsigned long flags;
-
- /*
- * Keep taking off work from the queue until
- * done.
- */
- spin_lock_irqsave(&cwq->lock, flags);
+ spin_lock_irq(&cwq->lock);
cwq->run_depth++;
if (cwq->run_depth > 3) {
/* morton gets to eat his hat */
printk("%s: recursion depth exceeded: %d\n",
- __FUNCTION__, cwq->run_depth);
+ __func__, cwq->run_depth);
dump_stack();
}
while (!list_empty(&cwq->worklist)) {
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;
+#ifdef CONFIG_LOCKDEP
+ /*
+ * It is permissible to free the struct work_struct
+ * from inside the function that is called from it,
+ * this we need to take into account for lockdep too.
+ * To avoid bogus "held lock freed" warnings as well
+ * as problems when looking into work->lockdep_map,
+ * make a copy and use that here.
+ */
+ struct lockdep_map lockdep_map = work->lockdep_map;
+#endif
cwq->current_work = work;
list_del_init(cwq->worklist.next);
- spin_unlock_irqrestore(&cwq->lock, flags);
+ spin_unlock_irq(&cwq->lock);
BUG_ON(get_wq_data(work) != cwq);
- if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
- work_release(work);
+ work_clear_pending(work);
+ lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
f(work);
+ lock_release(&lockdep_map, 1, _THIS_IP_);
+ lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%d\n",
current->comm, preempt_count(),
- current->pid);
+ task_pid_nr(current));
printk(KERN_ERR " last function: ");
print_symbol("%s\n", (unsigned long)f);
debug_show_held_locks(current);
dump_stack();
}
- spin_lock_irqsave(&cwq->lock, flags);
+ spin_lock_irq(&cwq->lock);
cwq->current_work = NULL;
}
cwq->run_depth--;
- spin_unlock_irqrestore(&cwq->lock, flags);
-}
-
-/*
- * NOTE: the caller must not touch *cwq if this func returns true
- */
-static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
-{
- int should_stop = cwq->should_stop;
-
- if (unlikely(should_stop)) {
- spin_lock_irq(&cwq->lock);
- should_stop = cwq->should_stop && list_empty(&cwq->worklist);
- if (should_stop)
- cwq->thread = NULL;
- spin_unlock_irq(&cwq->lock);
- }
-
- return should_stop;
+ spin_unlock_irq(&cwq->lock);
}
static int worker_thread(void *__cwq)
{
struct cpu_workqueue_struct *cwq = __cwq;
DEFINE_WAIT(wait);
- struct k_sigaction sa;
- sigset_t blocked;
- if (!cwq->wq->freezeable)
- current->flags |= PF_NOFREEZE;
+ if (cwq->wq->freezeable)
+ set_freezable();
set_user_nice(current, -5);
- /* Block and flush all signals */
- sigfillset(&blocked);
- sigprocmask(SIG_BLOCK, &blocked, NULL);
- flush_signals(current);
-
- /*
- * We inherited MPOL_INTERLEAVE from the booting kernel.
- * Set MPOL_DEFAULT to insure node local allocations.
- */
- numa_default_policy();
-
- /* SIG_IGN makes children autoreap: see do_notify_parent(). */
- sa.sa.sa_handler = SIG_IGN;
- sa.sa.sa_flags = 0;
- siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
- do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
-
for (;;) {
- if (cwq->wq->freezeable)
- try_to_freeze();
-
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
- if (!cwq->should_stop && list_empty(&cwq->worklist))
+ if (!freezing(current) &&
+ !kthread_should_stop() &&
+ list_empty(&cwq->worklist))
schedule();
finish_wait(&cwq->more_work, &wait);
- if (cwq_should_stop(cwq))
+ try_to_freeze();
+
+ if (kthread_should_stop())
break;
run_workqueue(cwq);
insert_work(cwq, &barr->work, tail);
}
-static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
+static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
+ int active;
+
if (cwq->thread == current) {
/*
* Probably keventd trying to flush its own queue. So simply run
* it by hand rather than deadlocking.
*/
run_workqueue(cwq);
+ active = 1;
} else {
struct wq_barrier barr;
- int active = 0;
+ active = 0;
spin_lock_irq(&cwq->lock);
if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
insert_wq_barrier(cwq, &barr, 1);
if (active)
wait_for_completion(&barr.done);
}
+
+ return active;
}
/**
* This function used to run the workqueues itself. Now we just wait for the
* helper threads to do it.
*/
-void fastcall flush_workqueue(struct workqueue_struct *wq)
+void flush_workqueue(struct workqueue_struct *wq)
{
- if (is_single_threaded(wq))
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
- else {
- int cpu;
+ const cpumask_t *cpu_map = wq_cpu_map(wq);
+ int cpu;
- for_each_cpu_mask(cpu, cpu_populated_map)
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
- }
+ might_sleep();
+ lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&wq->lockdep_map, 1, _THIS_IP_);
+ for_each_cpu_mask(cpu, *cpu_map)
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
-static void wait_on_work(struct cpu_workqueue_struct *cwq,
+/*
+ * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
+ * so this work can't be re-armed in any way.
+ */
+static int try_to_grab_pending(struct work_struct *work)
+{
+ struct cpu_workqueue_struct *cwq;
+ int ret = -1;
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
+ return 0;
+
+ /*
+ * The queueing is in progress, or it is already queued. Try to
+ * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+ */
+
+ cwq = get_wq_data(work);
+ if (!cwq)
+ return ret;
+
+ spin_lock_irq(&cwq->lock);
+ if (!list_empty(&work->entry)) {
+ /*
+ * This work is queued, but perhaps we locked the wrong cwq.
+ * In that case we must see the new value after rmb(), see
+ * insert_work()->wmb().
+ */
+ smp_rmb();
+ if (cwq == get_wq_data(work)) {
+ list_del_init(&work->entry);
+ ret = 1;
+ }
+ }
+ spin_unlock_irq(&cwq->lock);
+
+ return ret;
+}
+
+static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)
{
struct wq_barrier barr;
wait_for_completion(&barr.done);
}
-/**
- * flush_work - block until a work_struct's callback has terminated
- * @wq: the workqueue on which the work is queued
- * @work: the work which is to be flushed
- *
- * flush_work() will attempt to cancel the work if it is queued. If the work's
- * callback appears to be running, flush_work() will block until it has
- * completed.
- *
- * flush_work() is designed to be used when the caller is tearing down data
- * structures which the callback function operates upon. It is expected that,
- * prior to calling flush_work(), the caller has arranged for the work to not
- * be requeued.
- */
-void flush_work(struct workqueue_struct *wq, struct work_struct *work)
+static void wait_on_work(struct work_struct *work)
{
struct cpu_workqueue_struct *cwq;
+ struct workqueue_struct *wq;
+ const cpumask_t *cpu_map;
+ int cpu;
+
+ might_sleep();
+
+ lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&work->lockdep_map, 1, _THIS_IP_);
cwq = get_wq_data(work);
- /* Was it ever queued ? */
if (!cwq)
return;
- /*
- * This work can't be re-queued, no need to re-check that
- * get_wq_data() is still the same when we take cwq->lock.
- */
- spin_lock_irq(&cwq->lock);
- list_del_init(&work->entry);
- work_release(work);
- spin_unlock_irq(&cwq->lock);
+ wq = cwq->wq;
+ cpu_map = wq_cpu_map(wq);
- if (is_single_threaded(wq))
- wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
- else {
- int cpu;
+ for_each_cpu_mask(cpu, *cpu_map)
+ wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+}
- for_each_cpu_mask(cpu, cpu_populated_map)
- wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
- }
+static int __cancel_work_timer(struct work_struct *work,
+ struct timer_list* timer)
+{
+ int ret;
+
+ do {
+ ret = (timer && likely(del_timer(timer)));
+ if (!ret)
+ ret = try_to_grab_pending(work);
+ wait_on_work(work);
+ } while (unlikely(ret < 0));
+
+ work_clear_pending(work);
+ return ret;
}
-EXPORT_SYMBOL_GPL(flush_work);
+/**
+ * cancel_work_sync - block until a work_struct's callback has terminated
+ * @work: the work which is to be flushed
+ *
+ * Returns true if @work was pending.
+ *
+ * cancel_work_sync() will cancel the work if it is queued. If the work's
+ * callback appears to be running, cancel_work_sync() will block until it
+ * has completed.
+ *
+ * It is possible to use this function if the work re-queues itself. It can
+ * cancel the work even if it migrates to another workqueue, however in that
+ * case it only guarantees that work->func() has completed on the last queued
+ * workqueue.
+ *
+ * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
+ * pending, otherwise it goes into a busy-wait loop until the timer expires.
+ *
+ * The caller must ensure that workqueue_struct on which this work was last
+ * queued can't be destroyed before this function returns.
+ */
+int cancel_work_sync(struct work_struct *work)
+{
+ return __cancel_work_timer(work, NULL);
+}
+EXPORT_SYMBOL_GPL(cancel_work_sync);
+
+/**
+ * cancel_delayed_work_sync - reliably kill off a delayed work.
+ * @dwork: the delayed work struct
+ *
+ * Returns true if @dwork was pending.
+ *
+ * It is possible to use this function if @dwork rearms itself via queue_work()
+ * or queue_delayed_work(). See also the comment for cancel_work_sync().
+ */
+int cancel_delayed_work_sync(struct delayed_work *dwork)
+{
+ return __cancel_work_timer(&dwork->work, &dwork->timer);
+}
+EXPORT_SYMBOL(cancel_delayed_work_sync);
-static struct workqueue_struct *keventd_wq;
+static struct workqueue_struct *keventd_wq __read_mostly;
/**
* schedule_work - put work task in global workqueue
*
* This puts a job in the kernel-global workqueue.
*/
-int fastcall schedule_work(struct work_struct *work)
+int schedule_work(struct work_struct *work)
{
return queue_work(keventd_wq, work);
}
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
-int fastcall schedule_delayed_work(struct delayed_work *dwork,
+int schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
- timer_stats_timer_set_start_info(&dwork->timer);
return queue_delayed_work(keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
* Returns zero on success.
* Returns -ve errno on failure.
*
- * Appears to be racy against CPU hotplug.
- *
* schedule_on_each_cpu() is very slow.
*/
int schedule_on_each_cpu(work_func_t func)
if (!works)
return -ENOMEM;
- preempt_disable(); /* CPU hotplug */
+ get_online_cpus();
for_each_online_cpu(cpu) {
struct work_struct *work = per_cpu_ptr(works, cpu);
set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
}
- preempt_enable();
flush_workqueue(keventd_wq);
+ put_online_cpus();
free_percpu(works);
return 0;
}
}
EXPORT_SYMBOL(flush_scheduled_work);
-void flush_work_keventd(struct work_struct *work)
-{
- flush_work(keventd_wq, work);
-}
-EXPORT_SYMBOL(flush_work_keventd);
-
-/**
- * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
- * @wq: the controlling workqueue structure
- * @dwork: the delayed work struct
- */
-void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
- struct delayed_work *dwork)
-{
- while (!cancel_delayed_work(dwork))
- flush_workqueue(wq);
-}
-EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
-
-/**
- * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
- * @dwork: the delayed work struct
- */
-void cancel_rearming_delayed_work(struct delayed_work *dwork)
-{
- cancel_rearming_delayed_workqueue(keventd_wq, dwork);
-}
-EXPORT_SYMBOL(cancel_rearming_delayed_work);
-
/**
* execute_in_process_context - reliably execute the routine with user context
* @fn: the function to execute
int current_is_keventd(void)
{
struct cpu_workqueue_struct *cwq;
- int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
+ int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
int ret = 0;
BUG_ON(!keventd_wq);
return PTR_ERR(p);
cwq->thread = p;
- cwq->should_stop = 0;
- if (!is_single_threaded(wq))
- kthread_bind(p, cpu);
-
- if (is_single_threaded(wq) || cpu_online(cpu))
- wake_up_process(p);
return 0;
}
-struct workqueue_struct *__create_workqueue(const char *name,
- int singlethread, int freezeable)
+static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+{
+ struct task_struct *p = cwq->thread;
+
+ if (p != NULL) {
+ if (cpu >= 0)
+ kthread_bind(p, cpu);
+ wake_up_process(p);
+ }
+}
+
+struct workqueue_struct *__create_workqueue_key(const char *name,
+ int singlethread,
+ int freezeable,
+ struct lock_class_key *key,
+ const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
}
wq->name = name;
+ lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
+ wq->singlethread = singlethread;
wq->freezeable = freezeable;
+ INIT_LIST_HEAD(&wq->list);
if (singlethread) {
- INIT_LIST_HEAD(&wq->list);
cwq = init_cpu_workqueue(wq, singlethread_cpu);
err = create_workqueue_thread(cwq, singlethread_cpu);
+ start_workqueue_thread(cwq, -1);
} else {
- mutex_lock(&workqueue_mutex);
+ get_online_cpus();
+ spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);
+ spin_unlock(&workqueue_lock);
for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu);
if (err || !cpu_online(cpu))
continue;
err = create_workqueue_thread(cwq, cpu);
+ start_workqueue_thread(cwq, cpu);
}
- mutex_unlock(&workqueue_mutex);
+ put_online_cpus();
}
if (err) {
}
return wq;
}
-EXPORT_SYMBOL_GPL(__create_workqueue);
+EXPORT_SYMBOL_GPL(__create_workqueue_key);
-static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
{
- struct wq_barrier barr;
- int alive = 0;
-
- spin_lock_irq(&cwq->lock);
- if (cwq->thread != NULL) {
- insert_wq_barrier(cwq, &barr, 1);
- cwq->should_stop = 1;
- alive = 1;
- }
- spin_unlock_irq(&cwq->lock);
+ /*
+ * Our caller is either destroy_workqueue() or CPU_DEAD,
+ * get_online_cpus() protects cwq->thread.
+ */
+ if (cwq->thread == NULL)
+ return;
- if (alive) {
- wait_for_completion(&barr.done);
+ lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+ lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
- while (unlikely(cwq->thread != NULL))
- cpu_relax();
- /*
- * Wait until cwq->thread unlocks cwq->lock,
- * it won't touch *cwq after that.
- */
- smp_rmb();
- spin_unlock_wait(&cwq->lock);
- }
+ flush_cpu_workqueue(cwq);
+ /*
+ * If the caller is CPU_DEAD and cwq->worklist was not empty,
+ * a concurrent flush_workqueue() can insert a barrier after us.
+ * However, in that case run_workqueue() won't return and check
+ * kthread_should_stop() until it flushes all work_struct's.
+ * When ->worklist becomes empty it is safe to exit because no
+ * more work_structs can be queued on this cwq: flush_workqueue
+ * checks list_empty(), and a "normal" queue_work() can't use
+ * a dead CPU.
+ */
+ kthread_stop(cwq->thread);
+ cwq->thread = NULL;
}
/**
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- struct cpu_workqueue_struct *cwq;
-
- if (is_single_threaded(wq)) {
- cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
- cleanup_workqueue_thread(cwq, singlethread_cpu);
- } else {
- int cpu;
+ const cpumask_t *cpu_map = wq_cpu_map(wq);
+ int cpu;
- mutex_lock(&workqueue_mutex);
- list_del(&wq->list);
- mutex_unlock(&workqueue_mutex);
+ get_online_cpus();
+ spin_lock(&workqueue_lock);
+ list_del(&wq->list);
+ spin_unlock(&workqueue_lock);
- for_each_cpu_mask(cpu, cpu_populated_map) {
- cwq = per_cpu_ptr(wq->cpu_wq, cpu);
- cleanup_workqueue_thread(cwq, cpu);
- }
- }
+ for_each_cpu_mask(cpu, *cpu_map)
+ cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
+ put_online_cpus();
free_percpu(wq->cpu_wq);
kfree(wq);
struct cpu_workqueue_struct *cwq;
struct workqueue_struct *wq;
- switch (action) {
- case CPU_LOCK_ACQUIRE:
- mutex_lock(&workqueue_mutex);
- return NOTIFY_OK;
-
- case CPU_LOCK_RELEASE:
- mutex_unlock(&workqueue_mutex);
- return NOTIFY_OK;
+ action &= ~CPU_TASKS_FROZEN;
+ switch (action) {
case CPU_UP_PREPARE:
cpu_set(cpu, cpu_populated_map);
}
case CPU_UP_PREPARE:
if (!create_workqueue_thread(cwq, cpu))
break;
- printk(KERN_ERR "workqueue for %i failed\n", cpu);
+ printk(KERN_ERR "workqueue [%s] for %i failed\n",
+ wq->name, cpu);
return NOTIFY_BAD;
case CPU_ONLINE:
- wake_up_process(cwq->thread);
+ start_workqueue_thread(cwq, cpu);
break;
case CPU_UP_CANCELED:
- if (cwq->thread)
- wake_up_process(cwq->thread);
+ start_workqueue_thread(cwq, -1);
case CPU_DEAD:
- cleanup_workqueue_thread(cwq, cpu);
+ cleanup_workqueue_thread(cwq);
break;
}
}
+ switch (action) {
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ cpu_clear(cpu, cpu_populated_map);
+ }
+
return NOTIFY_OK;
}
-void init_workqueues(void)
+void __init init_workqueues(void)
{
cpu_populated_map = cpu_online_map;
singlethread_cpu = first_cpu(cpu_possible_map);
+ cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);