ASICCtrl = 0x30,
EEData = 0x34,
EECtrl = 0x36,
- TxStartThresh = 0x3c,
- RxEarlyThresh = 0x3e,
FlashAddr = 0x40,
FlashData = 0x44,
TxStatus = 0x46,
static void init_ring(struct net_device *dev);
static int start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
static void rx_poll(unsigned long data);
static void tx_poll(unsigned long data);
static void refill_rx (struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
-static struct ethtool_ops ethtool_ops;
+static const struct ethtool_ops ethtool_ops;
static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
{
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
+ unsigned long flags;
int i;
/* Do we need to reset the chip??? */
iowrite8(0x01, ioaddr + DebugCtrl1);
netif_start_queue(dev);
+ spin_lock_irqsave(&np->lock, flags);
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
if (netif_msg_ifup(np))
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
unsigned long flag;
-
+
netif_stop_queue(dev);
tasklet_disable(&np->tx_tasklet);
iowrite16(0, ioaddr + IntrEnable);
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
le32_to_cpu(np->tx_ring[i].frag[0].length));
}
- printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
- ioread32(np->base + TxListPtr),
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
netif_queue_stopped(dev));
- printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
np->cur_tx, np->cur_tx % TX_RING_SIZE,
np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
struct net_device *dev = (struct net_device *)data;
struct netdev_private *np = netdev_priv(dev);
unsigned head = np->cur_task % TX_RING_SIZE;
- struct netdev_desc *txdesc =
+ struct netdev_desc *txdesc =
&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
-
+
/* Chain the next pointer */
for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
int entry = np->cur_task % TX_RING_SIZE;
struct sk_buff *skb;
int i;
int irq = in_interrupt();
-
+
/* Reset tx logic, TxListPtr will be cleaned */
iowrite16 (TxDisable, ioaddr + MACCtrl1);
sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
/* free all tx skbuff */
for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pci_dev,
+ pci_unmap_single(np->pci_dev,
np->tx_ring[i].frag[0].addr, skb->len,
PCI_DMA_TODEVICE);
if (irq)
}
np->cur_tx = np->dirty_tx = 0;
np->cur_task = 0;
+
+ np->last_tx = NULL;
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+
iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
return 0;
}
-/* The interrupt handler cleans up after the Tx thread,
+/* The interrupt handler cleans up after the Tx thread,
and schedule a Rx thread work */
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+static irqreturn_t intr_handler(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *)dev_instance;
struct netdev_private *np = netdev_priv(dev);
int tx_cnt;
int tx_status;
int handled = 0;
+ int i;
do {
np->stats.tx_fifo_errors++;
if (tx_status & 0x02)
np->stats.tx_window_errors++;
+
/*
** This reset has been verified on
** DFE-580TX boards ! phdm@macqel.be.
*/
if (tx_status & 0x10) { /* TxUnderrun */
- unsigned short txthreshold;
-
- txthreshold = ioread16 (ioaddr + TxStartThresh);
/* Restart Tx FIFO and transmitter */
sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
- iowrite16 (txthreshold, ioaddr + TxStartThresh);
/* No need to reset the Tx pointer here */
}
- /* Restart the Tx. */
- iowrite16 (TxEnable, ioaddr + MACCtrl1);
+ /* Restart the Tx. Need to make sure tx enabled */
+ i = 10;
+ do {
+ iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+ if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+ break;
+ mdelay(1);
+ } while (--i);
}
/* Yup, this is a documentation bug. It cost me *hours*. */
iowrite16 (0, ioaddr + TxStatus);
} else {
hw_frame_id = ioread8(ioaddr + TxFrameId);
}
-
- if (np->pci_rev_id >= 0x14) {
+
+ if (np->pci_rev_id >= 0x14) {
spin_lock(&np->lock);
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
!(le32_to_cpu(np->tx_ring[entry].status)
& 0x00010000))
break;
- if (sw_frame_id == (hw_frame_id + 1) %
+ if (sw_frame_id == (hw_frame_id + 1) %
TX_RING_SIZE)
break;
skb = np->tx_skbuff[entry];
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
- if (!(le32_to_cpu(np->tx_ring[entry].status)
+ if (!(le32_to_cpu(np->tx_ring[entry].status)
& 0x00010000))
break;
skb = np->tx_skbuff[entry];
}
spin_unlock(&np->lock);
}
-
+
if (netif_queue_stopped(dev) &&
np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, clear busy flag. */
np->msg_enable = val;
}
-static struct ethtool_ops ethtool_ops = {
+static const struct ethtool_ops ethtool_ops = {
.begin = check_if_running,
.get_drvinfo = get_drvinfo,
.get_settings = get_settings,
case SIOCDEVPRIVATE:
for (i=0; i<TX_RING_SIZE; i++) {
printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
- (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
- (le32_to_cpu(np->tx_ring[i].status) >> 2)
+ (le32_to_cpu(np->tx_ring[i].status) >> 2)
& 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag[0].addr),
le32_to_cpu(np->tx_ring[i].frag[0].length));
}
- printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
- ioread32(np->base + TxListPtr),
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
netif_queue_stopped(dev));
- printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
np->cur_tx, np->cur_tx % TX_RING_SIZE,
np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
return 0;
}
-
+
return rc;
}
struct sk_buff *skb;
int i;
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = NULL;
+
netif_stop_queue(dev);
if (netif_msg_ifdown(np)) {
/* Disable interrupts by clearing the interrupt mask. */
iowrite16(0x0000, ioaddr + IntrEnable);
+ /* Disable Rx and Tx DMA for safely release resource */
+ iowrite32(0x500, ioaddr + DMACtrl);
+
/* Stop the chip's Tx and Rx processes. */
iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
- /* Wait and kill tasklet */
- tasklet_kill(&np->rx_tasklet);
- tasklet_kill(&np->tx_tasklet);
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr +ASICCtrl + 2);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
#ifdef __i386__
if (netif_msg_hw(np)) {
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pci_dev,