static int check_interval = 5 * 60; /* 5 minutes */
static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer, NULL);
static void mcheck_check_cpu(void *info)
{
if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
- PREPARE_WORK(&ap->port_task, fn, data);
+ PREPARE_DELAYED_WORK(&ap->port_task, fn, data);
- if (!delay)
- rc = queue_work(ata_wq, &ap->port_task);
- else
- rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+ rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
/* rc == 0 means that another user is using port task */
WARN_ON(rc == 0);
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
- INIT_WORK(&ap->port_task, NULL, NULL);
- INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
+ INIT_DELAYED_WORK(&ap->port_task, NULL, NULL);
+ INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
- queue_work(ata_aux_wq, &ap->hotplug_task);
+ queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
ata_port_printk(ap, KERN_INFO, "EH complete\n");
static void rekey_seq_generator(void *private_);
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator, NULL);
/*
* Lock avoidance:
tty->overrun_time = jiffies;
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
- INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+ INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc, tty);
init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list);
- INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+ INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler, ctx);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
* we're in a worker thread already, don't use queue_delayed_work,
*/
if (requeue)
- queue_work(aio_wq, &ctx->wq);
+ queue_delayed_work(aio_wq, &ctx->wq, 0);
}
INIT_LIST_HEAD(&clp->cl_state_owners);
INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock);
- INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+ INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
static void nfs_expire_automounts(void *list);
LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts,
+ &nfs_automount_list);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
struct aio_ring_info ring_info;
- struct work_struct wq;
+ struct delayed_work wq;
};
/* prototypes */
if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used;
spin_unlock_irqrestore(&t->buf.lock, flags);
- schedule_work(&t->buf.work);
+ schedule_delayed_work(&t->buf.work, 0);
}
#endif
struct ata_host *host;
struct device *dev;
- struct work_struct port_task;
- struct work_struct hotplug_task;
+ struct delayed_work port_task;
+ struct delayed_work hotplug_task;
struct work_struct scsi_rescan_task;
unsigned int hsm_task_state;
unsigned long cl_lease_time;
unsigned long cl_last_renewal;
- struct work_struct cl_renewd;
+ struct delayed_work cl_renewd;
struct rpc_wait_queue cl_rpcwaitq;
#define RPC_PIPE_WAIT_FOR_OPEN 1
int flags;
struct rpc_pipe_ops *ops;
- struct work_struct queue_timeout;
+ struct delayed_work queue_timeout;
};
static inline struct rpc_inode *
unsigned long connect_timeout,
bind_timeout,
reestablish_timeout;
- struct work_struct connect_worker;
+ struct delayed_work connect_worker;
unsigned short port;
/*
};
struct tty_bufhead {
- struct work_struct work;
+ struct delayed_work work;
struct semaphore pty_sem;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
void (*func)(void *);
void *data;
void *wq_data;
+};
+
+struct delayed_work {
+ struct work_struct work;
struct timer_list timer;
};
.entry = { &(n).entry, &(n).entry }, \
.func = (f), \
.data = (d), \
+ }
+
+#define __DELAYED_WORK_INITIALIZER(n, f, d) { \
+ .work = __WORK_INITIALIZER((n).work, (f), (d)), \
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
}
#define DECLARE_WORK(n, f, d) \
struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define DECLARE_DELAYED_WORK(n, f, d) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
+
/*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function and data pointers
*/
#define PREPARE_WORK(_work, _func, _data) \
do { \
- (_work)->func = _func; \
- (_work)->data = _data; \
+ (_work)->func = (_func); \
+ (_work)->data = (_data); \
} while (0)
+#define PREPARE_DELAYED_WORK(_work, _func, _data) \
+ PREPARE_WORK(&(_work)->work, (_func), (_data))
+
/*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
*/
#define INIT_WORK(_work, _func, _data) \
do { \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->pending = 0; \
PREPARE_WORK((_work), (_func), (_data)); \
+ } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func, _data) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func), (_data)); \
init_timer(&(_work)->timer); \
} while (0)
+
extern struct workqueue_struct *__create_workqueue(const char *name,
int singlethread);
#define create_workqueue(name) __create_workqueue((name), 0)
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay);
+ struct delayed_work *work, unsigned long delay);
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
extern void flush_scheduled_work(void);
extern int current_is_keventd(void);
extern int keventd_up(void);
extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
- struct work_struct *);
+ struct delayed_work *);
int execute_in_process_context(void (*fn)(void *), void *,
struct execute_work *);
* function may still be running on return from cancel_delayed_work(). Run
* flush_scheduled_work() to wait on it.
*/
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
{
int ret;
ret = del_timer_sync(&work->timer);
if (ret)
- clear_bit(0, &work->pending);
+ clear_bit(0, &work->work.pending);
return ret;
}
static void delayed_work_timer_fn(unsigned long __data)
{
- struct work_struct *work = (struct work_struct *)__data;
- struct workqueue_struct *wq = work->wq_data;
+ struct delayed_work *dwork = (struct delayed_work *)__data;
+ struct workqueue_struct *wq = dwork->work.wq_data;
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
cpu = singlethread_cpu;
- __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+ __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
}
/**
* queue_delayed_work - queue work on a workqueue after delay
* @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
* @delay: number of jiffies to wait before queueing
*
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int fastcall queue_delayed_work(struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
- struct timer_list *timer = &work->timer;
+ struct timer_list *timer = &dwork->timer;
+ struct work_struct *work = &dwork->work;
+
+ if (delay == 0)
+ return queue_work(wq, work);
if (!test_and_set_bit(0, &work->pending)) {
BUG_ON(timer_pending(timer));
/* This stores wq for the moment, for the timer_fn */
work->wq_data = wq;
timer->expires = jiffies + delay;
- timer->data = (unsigned long)work;
+ timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
add_timer(timer);
ret = 1;
* Returns 0 if @work was already on a queue, non-zero otherwise.
*/
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
int ret = 0;
- struct timer_list *timer = &work->timer;
+ struct timer_list *timer = &dwork->timer;
+ struct work_struct *work = &dwork->work;
if (!test_and_set_bit(0, &work->pending)) {
BUG_ON(timer_pending(timer));
/* This stores wq for the moment, for the timer_fn */
work->wq_data = wq;
timer->expires = jiffies + delay;
- timer->data = (unsigned long)work;
+ timer->data = (unsigned long)dwork;
timer->function = delayed_work_timer_fn;
add_timer_on(timer, cpu);
ret = 1;
/**
* schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue.
*/
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
{
- return queue_delayed_work(keventd_wq, work, delay);
+ return queue_delayed_work(keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work);
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
* @delay: number of jiffies to wait
*
* After waiting for a given time this puts a job in the kernel-global
* workqueue on the specified CPU.
*/
int schedule_delayed_work_on(int cpu,
- struct work_struct *work, unsigned long delay)
+ struct delayed_work *dwork, unsigned long delay)
{
- return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+ return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
}
EXPORT_SYMBOL(schedule_delayed_work_on);
* cancel_rearming_delayed_workqueue - reliably kill off a delayed
* work whose handler rearms the delayed work.
* @wq: the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
*/
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
- struct work_struct *work)
+ struct delayed_work *dwork)
{
- while (!cancel_delayed_work(work))
+ while (!cancel_delayed_work(dwork))
flush_workqueue(wq);
}
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
/**
* cancel_rearming_delayed_work - reliably kill off a delayed keventd
* work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
*/
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
{
- cancel_rearming_delayed_workqueue(keventd_wq, work);
+ cancel_rearming_delayed_workqueue(keventd_wq, dwork);
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);
return g_cpucache_up == FULL;
}
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
*/
static void __devinit start_cpu_timer(int cpu)
{
- struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
- if (keventd_up() && reap_work->func == NULL) {
+ if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
- INIT_WORK(reap_work, cache_reap, NULL);
+ INIT_DELAYED_WORK(reap_work, cache_reap, NULL);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
static unsigned long linkwatch_nextevent;
static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL);
static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
unsigned long delay = linkwatch_nextevent - jiffies;
/* If we wrap around we'll delay it by at most HZ. */
- if (!delay || delay > HZ)
- schedule_work(&linkwatch_work);
- else
- schedule_delayed_work(&linkwatch_work, delay);
+ if (delay > HZ)
+ delay = 0;
+ schedule_delayed_work(&linkwatch_work, delay);
}
}
}
static struct file_operations cache_flush_operations;
static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL);
void cache_register(struct cache_detail *cd)
{
spin_unlock(&cache_list_lock);
/* start the cleaning process */
- schedule_work(&cache_cleaner);
+ schedule_delayed_work(&cache_cleaner, 0);
}
int cache_unregister(struct cache_detail *cd)
INIT_LIST_HEAD(&rpci->pipe);
rpci->pipelen = 0;
init_waitqueue_head(&rpci->waitq);
- INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+ INIT_DELAYED_WORK(&rpci->queue_timeout,
+ rpc_timeout_upcall_queue, rpci);
rpci->ops = NULL;
}
}
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
} else {
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
- schedule_work(&xprt->connect_worker);
+ schedule_delayed_work(&xprt->connect_worker, 0);
/* flush_scheduled_work can sleep... */
if (!RPC_IS_ASYNC(task))
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
- INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_UDP_CONN_TO;
xprt->reestablish_timeout = XS_UDP_REEST_TO;
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
- INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+ INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
xprt->bind_timeout = XS_BIND_TO;
xprt->connect_timeout = XS_TCP_CONN_TO;
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;