]> err.no Git - linux-2.6/commitdiff
unify flush_work/flush_work_keventd and rename it to cancel_work_sync
authorOleg Nesterov <oleg@tv-sign.ru>
Wed, 9 May 2007 09:34:22 +0000 (02:34 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 9 May 2007 19:30:53 +0000 (12:30 -0700)
flush_work(wq, work) doesn't need the first parameter, we can use cwq->wq
(this was possible from the very beginnig, I missed this).  So we can unify
flush_work_keventd and flush_work.

Also, rename flush_work() to cancel_work_sync() and fix all callers.
Perhaps this is not the best name, but "flush_work" is really bad.

(akpm: this is why the earlier patches bypassed maintainers)

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Tejun Heo <htejun@gmail.com>
Cc: Auke Kok <auke-jan.h.kok@intel.com>,
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
block/ll_rw_blk.c
drivers/ata/libata-core.c
drivers/net/e1000/e1000_main.c
drivers/net/phy/phy.c
drivers/net/tg3.c
fs/aio.c
include/linux/workqueue.h
kernel/workqueue.c
net/ipv4/ipvs/ip_vs_ctl.c

index c059767c552c88f7f0436b032503d536b027e372..df506571ed6037382d326f644f79ec47fdf4ced9 100644 (file)
@@ -3633,7 +3633,7 @@ EXPORT_SYMBOL(kblockd_schedule_work);
 
 void kblockd_flush_work(struct work_struct *work)
 {
-       flush_work(kblockd_workqueue, work);
+       cancel_work_sync(work);
 }
 EXPORT_SYMBOL(kblockd_flush_work);
 
index b74e56caba6f4846e6373a7280c6c0c7e5e2502a..fef87dd70d1774e8b70ef397b21098b7b98b0233 100644 (file)
@@ -1316,7 +1316,7 @@ void ata_port_flush_task(struct ata_port *ap)
        spin_unlock_irqrestore(ap->lock, flags);
 
        DPRINTK("flush #1\n");
-       flush_work(ata_wq, &ap->port_task.work); /* akpm: seems unneeded */
+       cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
 
        /*
         * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1327,7 @@ void ata_port_flush_task(struct ata_port *ap)
                if (ata_msg_ctl(ap))
                        ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
                                        __FUNCTION__);
-               flush_work(ata_wq, &ap->port_task.work);
+               cancel_work_sync(&ap->port_task.work);
        }
 
        spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6475,9 @@ void ata_port_detach(struct ata_port *ap)
        /* Flush hotplug task.  The sequence is similar to
         * ata_port_flush_task().
         */
-       flush_work(ata_aux_wq, &ap->hotplug_task.work); /* akpm: why? */
+       cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
        cancel_delayed_work(&ap->hotplug_task);
-       flush_work(ata_aux_wq, &ap->hotplug_task.work);
+       cancel_work_sync(&ap->hotplug_task.work);
 
  skip_eh:
        /* remove the associated SCSI host */
index 397e25bdbfecfd68d4808847850e1f5e8cbd5287..637ae8f6879199444716e0c8025ec56a3eaacdc0 100644 (file)
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
        int i;
 #endif
 
-       flush_work_keventd(&adapter->reset_task);
+       cancel_work_sync(&adapter->reset_task);
 
        e1000_release_manageability(adapter);
 
index f445c465b14e8b55be3d8945c42c9dd9ba68de13..f71dab347667cd3fc33b408aaa55c0fdf7e4e570 100644 (file)
@@ -663,9 +663,9 @@ int phy_stop_interrupts(struct phy_device *phydev)
 
        /*
         * Finish any pending work; we might have been scheduled to be called
-        * from keventd ourselves, but flush_work_keventd() handles that.
+        * from keventd ourselves, but cancel_work_sync() handles that.
         */
-       flush_work_keventd(&phydev->phy_queue);
+       cancel_work_sync(&phydev->phy_queue);
 
        free_irq(phydev->irq, phydev);
 
index 0c0f9c81732136e1ee23b5ea98dbf332ba82b14f..923b9c725cc3f4e5cd75434914dbb4ce466c151f 100644 (file)
@@ -7386,7 +7386,7 @@ static int tg3_close(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       flush_work_keventd(&tp->reset_task);
+       cancel_work_sync(&tp->reset_task);
 
        netif_stop_queue(dev);
 
index d18690bb03e9becaff3d098f3484fddb456a23d3..ac1c1587aa02dad4583b7a394cf1261e79a0c680 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -348,7 +348,7 @@ void fastcall exit_aio(struct mm_struct *mm)
                /*
                 * Ensure we don't leave the ctx on the aio_wq
                 */
-               flush_work(aio_wq, &ctx->wq.work);
+               cancel_work_sync(&ctx->wq.work);
 
                if (1 != atomic_read(&ctx->users))
                        printk(KERN_DEBUG
@@ -371,7 +371,7 @@ void fastcall __put_ioctx(struct kioctx *ctx)
        BUG_ON(ctx->reqs_active);
 
        cancel_delayed_work(&ctx->wq);
-       flush_work(aio_wq, &ctx->wq.work);
+       cancel_work_sync(&ctx->wq.work);
        aio_free_ring(ctx);
        mmdrop(ctx->mm);
        ctx->mm = NULL;
index e1581dce5890eecef2e19377bc4ffd4eb227264c..d555f31c0746a31a50f376ca41ed2cbcf48742fe 100644 (file)
@@ -128,30 +128,33 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
+                       struct delayed_work *work, unsigned long delay));
 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-       struct delayed_work *work, unsigned long delay);
+                       struct delayed_work *work, unsigned long delay);
+
 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
-extern void flush_work(struct workqueue_struct *wq, struct work_struct *work);
-extern void flush_work_keventd(struct work_struct *work);
+extern void flush_scheduled_work(void);
 
 extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
-
-extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
+                                       unsigned long delay));
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
+                                       unsigned long delay);
 extern int schedule_on_each_cpu(work_func_t func);
-extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
 
 extern void init_workqueues(void);
 int execute_in_process_context(work_func_t fn, struct execute_work *);
 
+extern void cancel_work_sync(struct work_struct *work);
+
 /*
  * Kill off a pending schedule_delayed_work().  Note that the work callback
  * function may still be running on return from cancel_delayed_work(), unless
  * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
- * flush_work() or cancel_work_sync() to wait on it.
+ * cancel_work_sync() to wait on it.
  */
 static inline int cancel_delayed_work(struct delayed_work *work)
 {
index 63885abf1ba093e09af3b0dc981a789bd1bf1d46..c9ab4293904ff2b047f45e21ff95f315eb7d41da 100644 (file)
@@ -413,23 +413,23 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
 }
 
 /**
- * flush_work - block until a work_struct's callback has terminated
- * @wq: the workqueue on which the work is queued
+ * cancel_work_sync - block until a work_struct's callback has terminated
  * @work: the work which is to be flushed
  *
- * flush_work() will attempt to cancel the work if it is queued.  If the work's
- * callback appears to be running, flush_work() will block until it has
- * completed.
+ * cancel_work_sync() will attempt to cancel the work if it is queued. If the
+ * work's callback appears to be running, cancel_work_sync() will block until
+ * it has completed.
  *
- * flush_work() is designed to be used when the caller is tearing down data
- * structures which the callback function operates upon.  It is expected that,
- * prior to calling flush_work(), the caller has arranged for the work to not
- * be requeued.
+ * cancel_work_sync() is designed to be used when the caller is tearing down
+ * data structures which the callback function operates upon. It is expected
+ * that, prior to calling cancel_work_sync(), the caller has arranged for the
+ * work to not be requeued.
  */
-void flush_work(struct workqueue_struct *wq, struct work_struct *work)
+void cancel_work_sync(struct work_struct *work)
 {
-       const cpumask_t *cpu_map = wq_cpu_map(wq);
        struct cpu_workqueue_struct *cwq;
+       struct workqueue_struct *wq;
+       const cpumask_t *cpu_map;
        int cpu;
 
        might_sleep();
@@ -448,10 +448,13 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work)
        work_clear_pending(work);
        spin_unlock_irq(&cwq->lock);
 
+       wq = cwq->wq;
+       cpu_map = wq_cpu_map(wq);
+
        for_each_cpu_mask(cpu, *cpu_map)
                wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
-EXPORT_SYMBOL_GPL(flush_work);
+EXPORT_SYMBOL_GPL(cancel_work_sync);
 
 
 static struct workqueue_struct *keventd_wq;
@@ -540,18 +543,13 @@ void flush_scheduled_work(void)
 }
 EXPORT_SYMBOL(flush_scheduled_work);
 
-void flush_work_keventd(struct work_struct *work)
-{
-       flush_work(keventd_wq, work);
-}
-EXPORT_SYMBOL(flush_work_keventd);
-
 /**
  * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
  * @dwork: the delayed work struct
  *
  * Note that the work callback function may still be running on return from
- * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
+ * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
+ * on it.
  */
 void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
index 342e836677a1e7b6377e3ac0971233a5a2df57b5..68fe1d4d0210384d358947acbdd55ed17bdc4f8d 100644 (file)
@@ -2387,7 +2387,7 @@ void ip_vs_control_cleanup(void)
        EnterFunction(2);
        ip_vs_trash_cleanup();
        cancel_rearming_delayed_work(&defense_work);
-       flush_work_keventd(&defense_work.work);
+       cancel_work_sync(&defense_work.work);
        ip_vs_kill_estimator(&ip_vs_stats);
        unregister_sysctl_table(sysctl_header);
        proc_net_remove("ip_vs_stats");