void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
- container_of(work, struct cn_callback_entry, work.work);
+ container_of(work, struct cn_callback_entry, work);
struct cn_callback_data *d = &cbq->data;
d->callback(d->callback_priv);
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback;
- INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
+ INIT_WORK(&cbq->work, &cn_queue_wrapper);
return cbq;
}
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
- cancel_delayed_work(&cbq->work);
flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- if (likely(!work_pending(&__cbq->work.work) &&
+ if (likely(!work_pending(&__cbq->work) &&
__cbq->data.ddata == NULL)) {
__cbq->data.callback_priv = msg;
__cbq->data.ddata = data;
__cbq->data.destruct_data = destruct_data;
- if (queue_delayed_work(
- dev->cbdev->cn_queue,
- &__cbq->work, 0))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
err = 0;
} else {
struct cn_callback_data *d;
d->destruct_data = destruct_data;
d->free = __cbq;
- INIT_DELAYED_WORK(&__cbq->work,
- &cn_queue_wrapper);
+ INIT_WORK(&__cbq->work,
+ &cn_queue_wrapper);
- if (queue_delayed_work(
- dev->cbdev->cn_queue,
- &__cbq->work, 0))
+ if (queue_work(dev->cbdev->cn_queue,
+ &__cbq->work))
err = 0;
else {
kfree(__cbq);
struct cn_callback_entry {
struct list_head callback_entry;
struct cn_callback *cb;
- struct delayed_work work;
+ struct work_struct work;
struct cn_queue_dev *pdev;
struct cn_callback_id id;