int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
int ipoib_ib_dev_stop(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work(&priv->pkey_task);
mutex_unlock(&pkey_mutex);
- flush_workqueue(ipoib_workqueue);
+ if (flush)
+ flush_workqueue(ipoib_workqueue);
}
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev, flush);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
ipoib_dbg(priv, "flushing\n");
- ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_down(dev, 0);
/*
* The device could have been brought down between the start and when
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev);
+ /*
+ * Now flush workqueue to make sure a scheduled task doesn't
+ * bring our internal state back up.
+ */
+ flush_workqueue(ipoib_workqueue);
+
+ ipoib_ib_dev_down(dev, 1);
ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
record->event == IB_EVENT_LID_CHANGE ||
record->event == IB_EVENT_SM_CHANGE) {
ipoib_dbg(priv, "Port active event\n");
- schedule_work(&priv->flush_task);
+ queue_work(ipoib_workqueue, &priv->flush_task);
}
}