* thread asleep on the destroy_comp list vs. an object destroyed
* here synchronously when the last reference is removed.
*/
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
{
- struct iwcm_work lwork, *work =
- container_of(_work, struct iwcm_work, work);
- struct iwcm_work *work = arg;
++ struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
+ struct iw_cm_event levent;
struct iwcm_id_private *cm_id_priv = work->cm_id;
unsigned long flags;
int empty;
bp->pdev = pdev;
spin_lock_init(&bp->phy_lock);
- INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+ INIT_WORK(&bp->reset_task, bnx2_reset_task);
dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
- mem_len = MB_GET_CID_ADDR(17);
+ mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
dev->mem_end = dev->mem_start + mem_len;
dev->irq = pdev->irq;
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
+ struct petp *tp;
struct port_info port[MAX_NPORTS];
- struct work_struct stats_update_task;
+ struct delayed_work stats_update_task;
struct timer_list stats_update_timer;
- struct semaphore mib_mutex;
spinlock_t tpi_lock;
spinlock_t work_lock;
+ spinlock_t mac_lock;
+
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
/*
* Processes elmer0 external interrupts in process context.
*/
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
{
- struct adapter *adapter = data;
+ struct adapter *adapter =
+ container_of(work, struct adapter, ext_intr_handler_task);
- elmer0_ext_intr_handler(adapter);
+ t1_elmer0_ext_intr_handler(adapter);
/* Now reenable external interrupts */
spin_lock_irq(&adapter->async_lock);
spin_lock_init(&adapter->tpi_lock);
spin_lock_init(&adapter->work_lock);
spin_lock_init(&adapter->async_lock);
+ spin_lock_init(&adapter->mac_lock);
INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task, adapter);
- INIT_WORK(&adapter->stats_update_task, mac_stats_task,
- adapter);
+ ext_intr_task);
+ INIT_DELAYED_WORK(&adapter->stats_update_task,
+ mac_stats_task);
- #ifdef work_struct
- init_timer(&adapter->stats_update_timer);
- adapter->stats_update_timer.function = mac_stats_timer;
- adapter->stats_update_timer.data =
- (unsigned long)adapter;
- #endif
pci_set_drvdata(pdev, netdev);
}
schedule_delayed_work(&bcm->periodic_work, HZ * 15);
}
- /* Estimate a "Badness" value based on the periodic work
- * state-machine state. "Badness" is worse (bigger), if the
- * periodic work will take longer.
- */
- static int estimate_periodic_work_badness(unsigned int state)
- {
- int badness = 0;
-
- if (state % 8 == 0) /* every 120 sec */
- badness += 10;
- if (state % 4 == 0) /* every 60 sec */
- badness += 5;
- if (state % 2 == 0) /* every 30 sec */
- badness += 1;
- if (state % 1 == 0) /* every 15 sec */
- badness += 1;
-
- #define BADNESS_LIMIT 4
- return badness;
- }
-
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
{
- struct bcm43xx_private *bcm = d;
+ struct bcm43xx_private *bcm =
+ container_of(work, struct bcm43xx_private, periodic_work.work);
struct net_device *net_dev = bcm->net_dev;
unsigned long flags;
u32 savedirqs = 0;
}
void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
{
- islpci_private *priv = data;
+ islpci_private *priv = container_of(work, islpci_private, reset_task);
+
islpci_reset(priv, 1);
- netif_wake_queue(priv->ndev);
priv->reset_task_pending = 0;
+ smp_wmb();
+ netif_wake_queue(priv->ndev);
}
void
return iw_stats;
}
- #ifdef DEBUG
- static const char* decryption_types[] = {
- [ZD_RX_NO_WEP] = "none",
- [ZD_RX_WEP64] = "WEP64",
- [ZD_RX_TKIP] = "TKIP",
- [ZD_RX_AES] = "AES",
- [ZD_RX_WEP128] = "WEP128",
- [ZD_RX_WEP256] = "WEP256",
- };
-
- static const char *decryption_type_string(u8 type)
- {
- const char *s;
-
- if (type < ARRAY_SIZE(decryption_types)) {
- s = decryption_types[type];
- } else {
- s = NULL;
- }
- return s ? s : "unknown";
- }
-
- static int is_ofdm(u8 frame_status)
- {
- return (frame_status & ZD_RX_OFDM);
- }
-
- void zd_dump_rx_status(const struct rx_status *status)
- {
- const char* modulation;
- u8 quality;
-
- if (is_ofdm(status->frame_status)) {
- modulation = "ofdm";
- quality = status->signal_quality_ofdm;
- } else {
- modulation = "cck";
- quality = status->signal_quality_cck;
- }
- pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n",
- modulation, status->signal_strength, quality,
- decryption_type_string(status->decryption_type));
- if (status->frame_status & ZD_RX_ERROR) {
- pr_debug("rx error %s%s%s%s%s%s\n",
- (status->frame_status & ZD_RX_TIMEOUT_ERROR) ?
- "timeout " : "",
- (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ?
- "fifo " : "",
- (status->frame_status & ZD_RX_DECRYPTION_ERROR) ?
- "decryption " : "",
- (status->frame_status & ZD_RX_CRC32_ERROR) ?
- "crc32 " : "",
- (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ?
- "addr1 " : "",
- (status->frame_status & ZD_RX_CRC16_ERROR) ?
- "crc16" : "");
- }
- }
- #endif /* DEBUG */
-
#define LINK_LED_WORK_DELAY HZ
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
{
- struct zd_mac *mac = p;
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, housekeeping.link_led_work.work);
struct zd_chip *chip = &mac->chip;
struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
int is_associated;
#define ZD_RX_CRC16_ERROR 0x40
#define ZD_RX_ERROR 0x80
- enum mac_flags {
- MAC_FIXED_CHANNEL = 0x01,
- };
-
struct housekeeping {
- struct work_struct link_led_work;
+ struct delayed_work link_led_work;
};
#define ZD_MAC_STATS_BUFFER_SIZE 16
INIT_LIST_HEAD(&hub->event_list);
hub->intfdev = &intf->dev;
hub->hdev = hdev;
- INIT_WORK(&hub->leds, led_work, hub);
+ INIT_DELAYED_WORK(&hub->leds, led_work);
usb_set_intfdata (intf, hub);
+ intf->needs_remote_wakeup = 1;
if (hdev->speed == USB_SPEED_HIGH)
highspeed_hubs++;
#else
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
{}
- #endif
+ #endif /* CONFIG_USB_SUSPEND */
+
+ #else
+
+ #define ksuspend_usb_init() 0
+ #define ksuspend_usb_cleanup() do {} while (0)
+
+ #endif /* CONFIG_PM */
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
hid_io_error(hid);
}
- /* Workqueue routine to reset the device */
+ /* Workqueue routine to reset the device or clear a halt */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
{
- struct hid_device *hid = (struct hid_device *) _hid;
+ struct hid_device *hid =
+ container_of(work, struct hid_device, reset_work);
- int rc_lock, rc;
-
- dev_dbg(&hid->intf->dev, "resetting device\n");
- rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
- if (rc_lock >= 0) {
- rc = usb_reset_composite_device(hid->dev, hid->intf);
- if (rc_lock)
- usb_unlock_device(hid->dev);
+ int rc_lock, rc = 0;
+
+ if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
+ dev_dbg(&hid->intf->dev, "clear halt\n");
+ rc = usb_clear_halt(hid->dev, hid->urbin->pipe);
+ clear_bit(HID_CLEAR_HALT, &hid->iofl);
+ hid_start_in(hid);
+ }
+
+ else if (test_bit(HID_RESET_PENDING, &hid->iofl)) {
+ dev_dbg(&hid->intf->dev, "resetting device\n");
+ rc = rc_lock = usb_lock_device_for_reset(hid->dev, hid->intf);
+ if (rc_lock >= 0) {
+ rc = usb_reset_composite_device(hid->dev, hid->intf);
+ if (rc_lock)
+ usb_unlock_device(hid->dev);
+ }
+ clear_bit(HID_RESET_PENDING, &hid->iofl);
}
- clear_bit(HID_RESET_PENDING, &hid->iofl);
switch (rc) {
case 0:
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
{
- unsigned long flags;
- struct netpoll_info *npinfo = p;
++ struct netpoll_info *npinfo =
++ container_of(work, struct netpoll_info, tx_work.work);
struct sk_buff *skb;
- while (queue_head) {
- spin_lock_irqsave(&queue_lock, flags);
-
- skb = queue_head;
- queue_head = skb->next;
- if (skb == queue_tail)
- queue_head = NULL;
-
- queue_depth--;
-
- spin_unlock_irqrestore(&queue_lock, flags);
-
- dev_queue_xmit(skb);
- }
- }
+ while ((skb = skb_dequeue(&npinfo->txq))) {
+ struct net_device *dev = skb->dev;
- static DECLARE_WORK(send_queue, queue_process);
+ if (!netif_device_present(dev) || !netif_running(dev)) {
+ __kfree_skb(skb);
+ continue;
+ }
- void netpoll_queue(struct sk_buff *skb)
- {
- unsigned long flags;
+ netif_tx_lock_bh(dev);
+ if (netif_queue_stopped(dev) ||
+ dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
+ skb_queue_head(&npinfo->txq, skb);
+ netif_tx_unlock_bh(dev);
- if (queue_depth == MAX_QUEUE_DEPTH) {
- __kfree_skb(skb);
- return;
+ schedule_delayed_work(&npinfo->tx_work, HZ/10);
+ return;
+ }
-
- netif_tx_unlock_bh(dev);
}
-
- spin_lock_irqsave(&queue_lock, flags);
- if (!queue_head)
- queue_head = skb;
- else
- queue_tail->next = skb;
- queue_tail = skb;
- queue_depth++;
- spin_unlock_irqrestore(&queue_lock, flags);
-
- schedule_work(&send_queue);
}
- static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
- unsigned short ulen, u32 saddr, u32 daddr)
+ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
+ unsigned short ulen, __be32 saddr, __be32 daddr)
{
- unsigned int psum;
+ __wsum psum;
if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
return 0;
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status;
- struct netpoll_info *npinfo;
-
- if (!np || !np->dev || !netif_running(np->dev)) {
- __kfree_skb(skb);
- return;
- }
-
- npinfo = np->dev->npinfo;
-
- /* avoid recursion */
- if (npinfo->poll_owner == smp_processor_id() ||
- np->dev->xmit_lock_owner == smp_processor_id()) {
- if (np->drop)
- np->drop(skb);
- else
- __kfree_skb(skb);
- return;
- }
-
- do {
- npinfo->tries--;
- netif_tx_lock(np->dev);
+ int status = NETDEV_TX_BUSY;
+ unsigned long tries;
+ struct net_device *dev = np->dev;
+ struct netpoll_info *npinfo = np->dev->npinfo;
+
+ if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ /* don't get messages out of order, and no recursion */
+ if (skb_queue_len(&npinfo->txq) == 0 &&
+ npinfo->poll_owner != smp_processor_id() &&
+ netif_tx_trylock(dev)) {
+ /* try until next clock tick */
+ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) {
+ if (!netif_queue_stopped(dev))
+ status = dev->hard_start_xmit(skb, dev);
- /*
- * network drivers do not expect to be called if the queue is
- * stopped.
- */
- status = NETDEV_TX_BUSY;
- if (!netif_queue_stopped(np->dev))
- status = np->dev->hard_start_xmit(skb, np->dev);
+ if (status == NETDEV_TX_OK)
+ break;
- netif_tx_unlock(np->dev);
+ /* tickle device maybe there is some cleanup */
+ netpoll_poll(np);
- /* success */
- if(!status) {
- npinfo->tries = MAX_RETRIES; /* reset */
- return;
+ udelay(USEC_PER_POLL);
}
+ netif_tx_unlock(dev);
+ }
- /* transmit busy */
- netpoll_poll(np);
- udelay(50);
- } while (npinfo->tries > 0);
+ if (status != NETDEV_TX_OK) {
+ skb_queue_tail(&npinfo->txq, skb);
- schedule_work(&npinfo->tx_work);
++ schedule_delayed_work(&npinfo->tx_work,0);
+ }
}
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
npinfo->rx_np = NULL;
spin_lock_init(&npinfo->poll_lock);
npinfo->poll_owner = -1;
- npinfo->tries = MAX_RETRIES;
+
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
- } else
+ skb_queue_head_init(&npinfo->txq);
- INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
++ INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
+
+ atomic_set(&npinfo->refcnt, 1);
+ } else {
npinfo = ndev->npinfo;
+ atomic_inc(&npinfo->refcnt);
+ }
if (!ndev->poll_controller) {
printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",