static int sharpsl_fatal_check(void);
static int sharpsl_average_value(int ad);
static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
/*
* Variables
*/
struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
EXPORT_SYMBOL(sharpsl_battery_kick);
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
{
int voltage, percent, apm_status, i = 0;
/* Corgi cannot confirm when battery fully charged so periodically kick! */
if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
&& time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
while(1) {
voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
sharpsl_pm_led(SHARPSL_LED_OFF);
sharpsl_pm.charge_mode = CHRG_OFF;
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
static void sharpsl_charge_error(void)
sharpsl_pm.charge_mode = CHRG_ERROR;
}
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
{
dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
else if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_charge_off();
- schedule_work(&sharpsl_bat);
+ schedule_delayed_work(&sharpsl_bat, 0);
}
sharpsl_charge_off();
} else if (sharpsl_pm.full_count < 2) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
- schedule_work(&toggle_charger);
+ schedule_delayed_work(&toggle_charger, 0);
} else {
sharpsl_charge_off();
sharpsl_pm.charge_mode = CHRG_DONE;
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
.rows = 8,
.cols = 8,
.keymap = nokia770_keymap,
- .keymapsize = ARRAY_SIZE(nokia770_keymap)
+ .keymapsize = ARRAY_SIZE(nokia770_keymap),
.delay = 4,
};
printk("HP connected\n");
}
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
{
down(&audio_pwr_sem);
if (audio_pwr_state == -1)
up(&audio_pwr_sem);
}
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
static void nokia770_audio_pwr_down(void)
{
static u8 tps_leds_change;
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
{
for (;;) {
u8 leds;
}
}
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
#ifdef CONFIG_OMAP_OSK_MISTRAL
cancel_delayed_work(&irda_config->gpio_expa);
PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
- schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+ schedule_delayed_work(&irda_config->gpio_expa, 0);
return 0;
}
static int max7310_write(struct i2c_client *client, int address, int data);
static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
static struct device *akita_ioexp_device;
static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
/*
EXPORT_SYMBOL(akita_set_ioexp);
EXPORT_SYMBOL(akita_reset_ioexp);
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
{
if (akita_ioexp_device)
max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
}
#endif
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
{
printk(KERN_ERR "simserial: do_softint called\n");
}
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
- INIT_WORK(&info->work, do_softint, info);
+ INIT_WORK(&info->work, do_softint);
info->state = sstate;
if (sstate->info) {
kfree(info);
* disable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
}
* enable the cmc interrupt vector.
*/
static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
{
on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
}
monarch_cpu = -1;
}
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
/*
* ia64_mca_cmc_int_handler
}
struct create_idle {
+ struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
{
- struct create_idle *c_idle = _c_idle;
+ struct create_idle *c_idle =
+ container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
{
int timeout;
struct create_idle c_idle = {
+ .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
- DECLARE_WORK(work, do_fork_idle, &c_idle);
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
* We can't use kernel_thread since we must avoid to reschedule the child.
*/
if (!keventd_up() || current_is_keventd())
- work.func(work.data);
+ c_idle.work.func(&c_idle.work);
else {
- schedule_work(&work);
+ schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
}
static int channel_open = 0;
/* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
{
if (!channel_open) {
if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
return;
}
- INIT_WORK(&work, sp_work, NULL);
+ INIT_WORK(&work, sp_work);
queue_work(workqueue, &work);
} else
queue_work(workqueue, &work);
static struct work_struct wd_work;
-static void wd_stop(void *unused)
+static void wd_stop(struct work_struct *unused)
{
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
int i = 0, rescue = 8;
ls_uart_init();
- INIT_WORK(&wd_work, wd_stop, NULL);
+ INIT_WORK(&wd_work, wd_stop);
schedule_work(&wd_work);
return 0;
#define OLD_BACKLIGHT_MAX 15
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
/* Although these variables are used in interrupt context, it makes no sense to
* protect them. No user is able to produce enough key events per second and
return level;
}
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
return error;
}
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
phy_info_t *phy;
struct work_struct phy_relink;
struct work_struct phy_display_config;
+ struct net_device *dev;
uint sequence_done;
NULL
};
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private, phy_relink);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
if (!fep->link && !fep->old_link) {
printk(".\n");
}
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = data;
- volatile struct fcc_enet_private *fep = dev->priv;
+ volatile struct fcc_enet_private *fep =
+ container_of(work, struct fcc_enet_private,
+ phy_display_config);
+ struct net_device *dev = fep->dev;
uint s = fep->phy_status;
printk("%s: config: auto-negotiation ", dev->name);
cep->phy_id_done = 0;
cep->phy_addr = fip->fc_phyaddr;
mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
- INIT_WORK(&cep->phy_relink, mii_display_status, dev);
- INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+ INIT_WORK(&cep->phy_relink, mii_display_status);
+ INIT_WORK(&cep->phy_display_config, mii_display_config);
+ cep->dev = dev;
#endif /* CONFIG_USE_MDIO */
fip++;
uint phy_speed;
phy_info_t *phy;
struct work_struct phy_task;
+ struct net_device *dev;
uint sequence_done;
printk(".\n");
}
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
volatile uint *s = &(fep->phy_status);
printk("%s: config: auto-negotiation ", dev->name);
fep->sequence_done = 1;
}
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
{
- struct net_device *dev = (struct net_device *)priv;
- struct fec_enet_private *fep = dev->priv;
+ struct fec_enet_private *fep =
+ container_of(work, struct fec_enet_private, phy_task);
+ struct net_device *dev = fep->dev;
int duplex;
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_relink);
schedule_work(&fep->phy_task);
}
{
struct fec_enet_private *fep = dev->priv;
- INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+ fep->dev = dev;
+ INIT_WORK(&fep->phy_task, mii_display_config);
schedule_work(&fep->phy_task);
}
* Work queue
*/
static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
/*
*
* call data gathering function for each (active) module
*/
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
{
struct list_head *lh;
struct appldata_ops *ops;
return -1;
}
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
struct tty_struct *tty, int irq)
{
struct list_head *ele, *next;
static LIST_HEAD(mc_requests);
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
{
struct mconsole_entry *req;
unsigned long flags;
}
}
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
{
* same device, since it tests for (dev->flags & IFF_UP). So
* there's no harm in delaying the device shutdown. */
schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
goto out;
}
reactivate_fd(lp->fd, UM_ETH_IRQ);
DECLARE_MUTEX(ports_sem);
struct list_head ports = LIST_HEAD_INIT(ports);
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
{
struct port_list *port;
struct list_head *ele;
local_irq_restore(flags);
}
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
} ____cacheline_aligned;
struct rackmeter_cpu {
- struct work_struct sniffer;
+ struct delayed_work sniffer;
+ struct rackmeter *rm;
cputime64_t prev_wall;
cputime64_t prev_idle;
int zero;
rackmeter_do_pause(rm, 0);
}
-static void rackmeter_do_timer(void *data)
+static void rackmeter_do_timer(struct work_struct *work)
{
- struct rackmeter *rm = data;
+ struct rackmeter_cpu *rcpu =
+ container_of(work, struct rackmeter_cpu, sniffer.work);
+ struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
- struct rackmeter_cpu *rcpu = &rm->cpu[cpu];
cputime64_t cur_jiffies, total_idle_ticks;
unsigned int total_ticks, idle_ticks;
int i, offset, load, cumm, pause;
* on those machines yet
*/
- INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm);
- INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm);
+ rm->cpu[0].rm = rm;
+ INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
+ rm->cpu[1].rm = rm;
+ INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
for_each_online_cpu(cpu) {
struct rackmeter_cpu *rcpu;
int state; /* Link status state machine */
adapter_t *adapter; /* associated adapter */
- struct work_struct phy_update;
+ struct delayed_work phy_update;
u16 bmsr;
int count;
return cphy_cause_link_change;
}
-static void my3216_poll(void *arg)
+static void my3216_poll(struct work_struct *work)
{
- my3126_interrupt_handler(arg);
+ struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+ my3126_interrupt_handler(cphy);
}
static int my3126_set_loopback(struct cphy *cphy, int on)
if (cphy)
cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
- INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
+ INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
cphy->bmsr = 0;
return (cphy);
spinlock_t lock;
struct work_struct watchdog_task;
struct work_struct tx_timeout_task;
+ struct net_device *netdev;
struct timer_list watchdog_timer;
u32 curr_window;
struct netxen_port *port);
int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
-void netxen_watchdog_task(unsigned long v);
+void netxen_watchdog_task(struct work_struct *work);
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
u32 ringid);
void netxen_process_cmd_ring(unsigned long data);
return rv;
}
-void netxen_watchdog_task(unsigned long v)
+void netxen_watchdog_task(struct work_struct *work)
{
int port_num;
struct netxen_port *port;
struct net_device *netdev;
- struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, watchdog_task);
if (netxen_nic_check_temp(adapter))
return;
static int netxen_nic_close(struct net_device *netdev);
static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
static void netxen_tx_timeout(struct net_device *netdev);
-static void netxen_tx_timeout_task(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
static void netxen_watchdog(unsigned long);
static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
static int netxen_nic_ioctl(struct net_device *netdev,
adapter->ahw.xg_linkup = 0;
adapter->watchdog_timer.function = &netxen_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
- INIT_WORK(&adapter->watchdog_task,
- (void (*)(void *))netxen_watchdog_task, adapter);
+ INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
adapter->ahw.pdev = pdev;
adapter->proc_cmd_buf_counter = 0;
pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
dev_addr);
}
}
- INIT_WORK(&adapter->tx_timeout_task,
- (void (*)(void *))netxen_tx_timeout_task, netdev);
+ adapter->netdev = netdev;
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
schedule_work(&adapter->tx_timeout_task);
}
-static void netxen_tx_timeout_task(struct net_device *netdev)
+static void netxen_tx_timeout_task(struct work_struct *work)
{
- struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, tx_timeout_task);
+ struct net_device *netdev = adapter->netdev;
unsigned long flags;
printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
netxen_nic_driver_name, netdev->name);
- spin_lock_irqsave(&port->adapter->lock, flags);
+ spin_lock_irqsave(&adapter->lock, flags);
netxen_nic_close(netdev);
netxen_nic_open(netdev);
- spin_unlock_irqrestore(&port->adapter->lock, flags);
+ spin_unlock_irqrestore(&adapter->lock, flags);
netdev->trans_start = jiffies;
netif_wake_queue(netdev);
}
/* work queue */
struct work_struct phy_configure;
+ struct net_device *dev;
int work_pending;
spinlock_t lock;
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
* is controlled by the RPC SPEED and RPC DPLX bits.
*/
-static void smc_phy_configure(void *data)
+static void smc_phy_configure(struct work_struct *work)
{
- struct net_device *dev = data;
- struct smc_local *lp = netdev_priv(dev);
+ struct smc_local *lp =
+ container_of(work, struct smc_local, phy_configure);
+ struct net_device *dev = lp->dev;
void __iomem *ioaddr = lp->base;
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
/* Configure the PHY, initialize the link state */
if (lp->phy_type != 0)
- smc_phy_configure(dev);
+ smc_phy_configure(&lp->phy_configure);
else {
spin_lock_irq(&lp->lock);
smc_10bt_check_media(dev, 1);
#endif
tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
- INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+ INIT_WORK(&lp->phy_configure, smc_phy_configure);
+ lp->dev = dev;
lp->mii.phy_id_mask = 0x1f;
lp->mii.reg_num_mask = 0x1f;
lp->mii.force_media = 0;
smc_reset(ndev);
smc_enable(ndev);
if (lp->phy_type != 0)
- smc_phy_configure(ndev);
+ smc_phy_configure(&lp->phy_configure);
netif_device_attach(ndev);
}
}
static void ieee_init(struct ieee80211_device *ieee);
static void softmac_init(struct ieee80211softmac_device *sm);
-static void set_rts_cts_work(void *d);
-static void set_basic_rates_work(void *d);
+static void set_rts_cts_work(struct work_struct *work);
+static void set_basic_rates_work(struct work_struct *work);
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
memset(mac, 0, sizeof(*mac));
spin_lock_init(&mac->lock);
mac->netdev = netdev;
- INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
- INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
+ INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
+ INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
ieee_init(ieee);
softmac_init(ieee80211_priv(netdev));
spin_unlock_irqrestore(&mac->lock, flags);
}
-static void set_rts_cts_work(void *d)
+static void set_rts_cts_work(struct work_struct *work)
{
- struct zd_mac *mac = d;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, set_rts_cts_work.work);
unsigned long flags;
u8 rts_rate;
unsigned int short_preamble;
try_enable_tx(mac);
}
-static void set_basic_rates_work(void *d)
+static void set_basic_rates_work(struct work_struct *work)
{
- struct zd_mac *mac = d;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, set_basic_rates_work.work);
unsigned long flags;
u16 basic_rates;
if (need_set_rts_cts && !mac->updating_rts_rate) {
mac->updating_rts_rate = 1;
netif_stop_queue(mac->netdev);
- queue_work(zd_workqueue, &mac->set_rts_cts_work);
+ queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
}
if (need_set_rates && !mac->updating_basic_rates) {
mac->updating_basic_rates = 1;
netif_stop_queue(mac->netdev);
- queue_work(zd_workqueue, &mac->set_basic_rates_work);
+ queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
+ 0);
}
spin_unlock_irqrestore(&mac->lock, flags);
}
struct iw_statistics iw_stats;
struct housekeeping housekeeping;
- struct work_struct set_rts_cts_work;
- struct work_struct set_basic_rates_work;
+ struct delayed_work set_rts_cts_work;
+ struct delayed_work set_basic_rates_work;
unsigned int stats_count;
u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
void (*cs_control)(u32 command);
};
-static void pump_messages(void *data);
+static void pump_messages(struct work_struct *work);
static int flush(struct driver_data *drv_data)
{
}
}
-static void pump_messages(void *data)
+static void pump_messages(struct work_struct *work)
{
- struct driver_data *drv_data = data;
+ struct driver_data *drv_data =
+ container_of(work, struct driver_data, pump_messages);
unsigned long flags;
/* Lock queue and check for queue work */
tasklet_init(&drv_data->pump_transfers,
pump_transfers, (unsigned long)drv_data);
- INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+ INIT_WORK(&drv_data->pump_messages, pump_messages);
drv_data->workqueue = create_singlethread_workqueue(
drv_data->master->cdev.dev->bus_id);
if (drv_data->workqueue == NULL)
unsigned has_indicators:1;
u8 indicator[USB_MAXCHILDREN];
- struct work_struct leds;
+ struct delayed_work leds;
};
char *urbdata; /* interrupt URB data buffer */
char *msgdata; /* control message data buffer */
- struct work_struct work;
+ struct delayed_work work;
int button_pressed;
spinlock_t lock;
};
case ACD_BTN_BRIGHT_UP:
case ACD_BTN_BRIGHT_DOWN:
pdata->button_pressed = 1;
- queue_work(wq, &pdata->work);
+ queue_delayed_work(wq, &pdata->work, 0);
break;
case ACD_BTN_NONE:
default:
.max_brightness = 0xFF
};
-static void appledisplay_work(void *private)
+static void appledisplay_work(struct work_struct *work)
{
- struct appledisplay *pdata = private;
+ struct appledisplay *pdata =
+ container_of(work, struct appledisplay, work.work);
int retval;
up(&pdata->bd->sem);
pdata->udev = udev;
spin_lock_init(&pdata->lock);
- INIT_WORK(&pdata->work, appledisplay_work, pdata);
+ INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
/* Allocate buffer for control messages */
pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
* Our LCD controller task (which is called when we blank or unblank)
* via keventd.
*/
-static void pxafb_task(void *dummy)
+static void pxafb_task(struct work_struct *work)
{
- struct pxafb_info *fbi = dummy;
+ struct pxafb_info *fbi =
+ container_of(work, struct pxafb_info, task);
u_int state = xchg(&fbi->task_state, -1);
set_ctrlr_state(fbi, state);
}
init_waitqueue_head(&fbi->ctrlr_wait);
- INIT_WORK(&fbi->task, pxafb_task, fbi);
+ INIT_WORK(&fbi->task, pxafb_task);
init_MUTEX(&fbi->ctrlr_sem);
return fbi;
/* Very specific to the needs of some platforms (h3,h4)
* having calls which can sleep in irda_set_speed.
*/
- struct work_struct gpio_expa;
+ struct delayed_work gpio_expa;
int rx_channel;
int tx_channel;
unsigned long dest_start;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
struct sk_buff_head txq;
- struct work_struct tx_work;
+ struct delayed_work tx_work;
};
void netpoll_poll(struct netpoll *np);