#include <asm/delay.h>
#include "mv643xx_eth.h"
-/*
- * The first part is the high level driver of the gigE ethernet ports.
- */
-
-/* Constants */
-#define VLAN_HLEN 4
-#define FCS_LEN 4
-#define DMA_ALIGN 8 /* hw requires 8-byte alignment */
-#define HW_IP_ALIGN 2 /* hw aligns IP header */
-#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
-#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
-
-#define INT_UNMASK_ALL 0x0007ffff
-#define INT_UNMASK_ALL_EXT 0x0011ffff
-#define INT_MASK_ALL 0x00000000
-#define INT_MASK_ALL_EXT 0x00000000
-#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
-#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
-
-#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
-#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
-#else
-#define MAX_DESCS_PER_SKB 1
-#endif
-
-#define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */
-#define PHY_WAIT_MICRO_SECONDS 10
-
/* Static function declarations */
static void eth_port_uc_addr_get(struct net_device *dev,
unsigned char *MacAddr);
}
/*
- * mv643xx_eth_rx_task
+ * mv643xx_eth_rx_refill_descs
*
* Fills / refills RX queue on a certain gigabit ethernet port
*
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
-static void mv643xx_eth_rx_task(void *data)
+static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *)data;
struct mv643xx_private *mp = netdev_priv(dev);
struct pkt_info pkt_info;
struct sk_buff *skb;
int unaligned;
- if (test_and_set_bit(0, &mp->rx_task_busy))
- panic("%s: Error in test_set_bit / clear_bit", dev->name);
-
- while (mp->rx_desc_count < (mp->rx_ring_size - 5)) {
- skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
+ while (mp->rx_desc_count < mp->rx_ring_size) {
+ skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
if (!skb)
break;
mp->rx_desc_count++;
- unaligned = (u32)skb->data & (DMA_ALIGN - 1);
+ unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
if (unaligned)
- skb_reserve(skb, DMA_ALIGN - unaligned);
+ skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
- pkt_info.byte_cnt = RX_SKB_SIZE;
- pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
- DMA_FROM_DEVICE);
+ pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
+ pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
+ ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
pkt_info.return_info = skb;
if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
printk(KERN_ERR
"%s: Error allocating RX Ring\n", dev->name);
break;
}
- skb_reserve(skb, HW_IP_ALIGN);
+ skb_reserve(skb, ETH_HW_IP_ALIGN);
}
- clear_bit(0, &mp->rx_task_busy);
/*
* If RX ring is empty of SKB, set a timer to try allocating
- * again in a later time .
+ * again at a later time.
*/
- if ((mp->rx_desc_count == 0) && (mp->rx_timer_flag == 0)) {
+ if (mp->rx_desc_count == 0) {
printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
- /* After 100mSec */
- mp->timeout.expires = jiffies + (HZ / 10);
+ mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
add_timer(&mp->timeout);
- mp->rx_timer_flag = 1;
}
-#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
- else {
- /* Return interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
- INT_UNMASK_ALL);
- }
-#endif
}
/*
- * mv643xx_eth_rx_task_timer_wrapper
+ * mv643xx_eth_rx_refill_descs_timer_wrapper
*
* Timer routine to wake up RX queue filling task. This function is
* used only in case the RX queue is empty, and all alloc_skb has
* Input : pointer to ethernet interface network device structure
* Output : N/A
*/
-static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data)
+static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
{
- struct net_device *dev = (struct net_device *)data;
- struct mv643xx_private *mp = netdev_priv(dev);
-
- mp->rx_timer_flag = 0;
- mv643xx_eth_rx_task((void *)data);
+ mv643xx_eth_rx_refill_descs((struct net_device *)data);
}
/*
{
struct mv643xx_private *mp = netdev_priv(dev);
- netif_device_detach(dev);
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
eth_port_reset(mp->port_num);
eth_port_start(dev);
- netif_device_attach(dev);
+
+ if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
+ netif_wake_queue(dev);
}
-/*
- * mv643xx_eth_free_tx_queue
- *
- * Input : dev - a pointer to the required interface
+/**
+ * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
*
- * Output : 0 if was able to release skb , nonzero otherwise
+ * If force is non-zero, frees uncompleted descriptors as well
*/
-static int mv643xx_eth_free_tx_queue(struct net_device *dev,
- unsigned int eth_int_cause_ext)
+int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
{
struct mv643xx_private *mp = netdev_priv(dev);
- struct net_device_stats *stats = &mp->stats;
- struct pkt_info pkt_info;
- int released = 1;
+ struct eth_tx_desc *desc;
+ u32 cmd_sts;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int tx_index;
+ dma_addr_t addr;
+ int count;
+ int released = 0;
- if (!(eth_int_cause_ext & (BIT0 | BIT8)))
- return released;
+ while (mp->tx_desc_count > 0) {
+ spin_lock_irqsave(&mp->lock, flags);
+ tx_index = mp->tx_used_desc_q;
+ desc = &mp->p_tx_desc_area[tx_index];
+ cmd_sts = desc->cmd_sts;
+
+ if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
+ spin_unlock_irqrestore(&mp->lock, flags);
+ return released;
+ }
+
+ mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size;
+ mp->tx_desc_count--;
+
+ addr = desc->buf_ptr;
+ count = desc->byte_cnt;
+ skb = mp->tx_skb[tx_index];
+ if (skb)
+ mp->tx_skb[tx_index] = NULL;
+
+ spin_unlock_irqrestore(&mp->lock, flags);
- /* Check only queue 0 */
- while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
- if (pkt_info.cmd_sts & BIT0) {
+ if (cmd_sts & ETH_ERROR_SUMMARY) {
printk("%s: Error in TX\n", dev->name);
- stats->tx_errors++;
+ mp->stats.tx_errors++;
}
- if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
- dma_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
+ if (cmd_sts & ETH_TX_FIRST_DESC)
+ dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
else
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
+ dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
- if (pkt_info.return_info) {
- dev_kfree_skb_irq(pkt_info.return_info);
- released = 0;
- }
+ if (skb)
+ dev_kfree_skb_irq(skb);
+
+ released = 1;
}
return released;
}
+static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
+{
+ struct mv643xx_private *mp = netdev_priv(dev);
+
+ if (mv643xx_eth_free_tx_descs(dev, 0) &&
+ mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
+ netif_wake_queue(dev);
+}
+
+static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
+{
+ mv643xx_eth_free_tx_descs(dev, 1);
+}
+
/*
* mv643xx_eth_receive
*
*
* Output : number of served packets
*/
-#ifdef MV643XX_NAPI
static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
-#else
-static int mv643xx_eth_receive_queue(struct net_device *dev)
-#endif
{
struct mv643xx_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &mp->stats;
struct sk_buff *skb;
struct pkt_info pkt_info;
-#ifdef MV643XX_NAPI
while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
-#else
- while (eth_port_receive(mp, &pkt_info) == ETH_OK) {
-#endif
mp->rx_desc_count--;
received_packets++;
- /* Update statistics. Note byte count includes 4 byte CRC count */
+ /*
+ * Update statistics.
+ * Note byte count includes 4 byte CRC count
+ */
stats->rx_packets++;
stats->rx_bytes += pkt_info.byte_cnt;
skb = pkt_info.return_info;
}
dev->last_rx = jiffies;
}
+ mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
return received_packets;
}
/* Read interrupt cause registers */
eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
- INT_UNMASK_ALL;
-
- if (eth_int_cause & BIT1)
+ ETH_INT_UNMASK_ALL;
+ if (eth_int_cause & ETH_INT_CAUSE_EXT) {
eth_int_cause_ext = mv_read(
MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
- INT_UNMASK_ALL_EXT;
-
-#ifdef MV643XX_NAPI
- if (!(eth_int_cause & 0x0007fffd)) {
- /* Dont ack the Rx interrupt */
-#endif
- /*
- * Clear specific ethernet port intrerrupt registers by
- * acknowleding relevant bits.
- */
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num),
- ~eth_int_cause);
- if (eth_int_cause_ext != 0x0)
- mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG
- (port_num), ~eth_int_cause_ext);
-
- /* UDP change : We may need this */
- if ((eth_int_cause_ext & 0x0000ffff) &&
- (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) &&
- (mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB))
- netif_wake_queue(dev);
-#ifdef MV643XX_NAPI
- } else {
- if (netif_rx_schedule_prep(dev)) {
- /* Mask all the interrupts */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_MASK_ALL);
- /* wait for previous write to complete */
- mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
- __netif_rx_schedule(dev);
- }
-#else
- if (eth_int_cause & (BIT2 | BIT11))
- mv643xx_eth_receive_queue(dev, 0);
-
- /*
- * After forwarded received packets to upper layer, add a task
- * in an interrupts enabled context that refills the RX ring
- * with skb's.
- */
-#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
- /* Mask all interrupts on ethernet port */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_MASK_ALL);
- /* wait for previous write to take effect */
- mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
-
- queue_task(&mp->rx_task, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
-#else
- mp->rx_task.func(dev);
-#endif
-#endif
+ ETH_INT_UNMASK_ALL_EXT;
+ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
+ ~eth_int_cause_ext);
}
+
/* PHY status changed */
- if (eth_int_cause_ext & (BIT16 | BIT20)) {
+ if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
struct ethtool_cmd cmd;
if (mii_link_ok(&mp->mii)) {
mii_ethtool_gset(&mp->mii, &cmd);
mv643xx_eth_update_pscr(dev, &cmd);
+ mv643xx_eth_port_enable_tx(port_num,
+ ETH_TX_QUEUES_ENABLED);
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
- if (mp->tx_ring_size - mp->tx_desc_count >
- MAX_DESCS_PER_SKB) {
+ if (mp->tx_ring_size - mp->tx_desc_count >=
+ MAX_DESCS_PER_SKB)
netif_wake_queue(dev);
- /* Start TX queue */
- mv643xx_eth_port_enable_tx(port_num, mp->port_tx_queue_command);
- }
}
} else if (netif_carrier_ok(dev)) {
netif_stop_queue(dev);
}
}
+#ifdef MV643XX_NAPI
+ if (eth_int_cause & ETH_INT_CAUSE_RX) {
+ /* schedule the NAPI poll routine to maintain port */
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
+ ETH_INT_MASK_ALL);
+ /* wait for previous write to complete */
+ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
+
+ netif_rx_schedule(dev);
+ }
+#else
+ if (eth_int_cause & ETH_INT_CAUSE_RX)
+ mv643xx_eth_receive_queue(dev, INT_MAX);
+#endif
+ if (eth_int_cause_ext & ETH_INT_CAUSE_TX)
+ mv643xx_eth_free_completed_tx_descs(dev);
+
/*
* If no real interrupt occured, exit.
* This can happen when using gigE interrupt coalescing mechanism.
mp->rx_used_desc_q = 0;
mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
-
- /* Enable queue 0 for this port */
- mp->port_rx_queue_command = 1;
}
/*
mp->tx_used_desc_q = 0;
mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
-
- /* Enable queue 0 for this port */
- mp->port_tx_queue_command = 1;
}
static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
eth_port_init(mp);
- INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev);
-
memset(&mp->timeout, 0, sizeof(struct timer_list));
- mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper;
+ mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
mp->timeout.data = (unsigned long)dev;
- mp->rx_task_busy = 0;
- mp->rx_timer_flag = 0;
-
/* Allocate RX and TX skb rings */
mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
GFP_KERNEL);
ether_init_rx_desc_ring(mp);
- mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */
+ mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
/* Clear any pending ethernet port interrupts */
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
/* Unmask phy and link status changes interrupts */
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
- INT_UNMASK_ALL_EXT);
+ ETH_INT_UNMASK_ALL_EXT);
/* Unmask RX buffer and TX end interrupt */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
return 0;
static void mv643xx_eth_free_tx_rings(struct net_device *dev)
{
struct mv643xx_private *mp = netdev_priv(dev);
- unsigned int port_num = mp->port_num;
- unsigned int curr;
- struct sk_buff *skb;
/* Stop Tx Queues */
- mv643xx_eth_port_disable_tx(port_num);
+ mv643xx_eth_port_disable_tx(mp->port_num);
- /* Free outstanding skb's on TX rings */
- for (curr = 0; mp->tx_desc_count && curr < mp->tx_ring_size; curr++) {
- skb = mp->tx_skb[curr];
- if (skb) {
- mp->tx_desc_count -= skb_shinfo(skb)->nr_frags;
- dev_kfree_skb(skb);
- mp->tx_desc_count--;
- }
- }
- if (mp->tx_desc_count)
- printk("%s: Error on Tx descriptor free - could not free %d"
- " descriptors\n", dev->name, mp->tx_desc_count);
+ /* Free outstanding skb's on TX ring */
+ mv643xx_eth_free_all_tx_descs(dev);
+
+ BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q);
/* Free TX ring */
if (mp->tx_sram_size)
unsigned int port_num = mp->port_num;
/* Mask all interrupts on ethernet port */
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
/* wait for previous write to complete */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
}
#ifdef MV643XX_NAPI
-static void mv643xx_tx(struct net_device *dev)
-{
- struct mv643xx_private *mp = netdev_priv(dev);
- struct pkt_info pkt_info;
-
- while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
- if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
- dma_unmap_single(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(NULL, pkt_info.buf_ptr,
- pkt_info.byte_cnt,
- DMA_TO_DEVICE);
-
- if (pkt_info.return_info)
- dev_kfree_skb_irq(pkt_info.return_info);
- }
-
- if (netif_queue_stopped(dev) &&
- mp->tx_ring_size - mp->tx_desc_count > MAX_DESCS_PER_SKB)
- netif_wake_queue(dev);
-}
-
/*
* mv643xx_poll
*
#ifdef MV643XX_TX_FAST_REFILL
if (++mp->tx_clean_threshold > 5) {
- mv643xx_tx(dev);
+ mv643xx_eth_free_completed_tx_descs(dev);
mp->tx_clean_threshold = 0;
}
#endif
if (orig_budget > dev->quota)
orig_budget = dev->quota;
work_done = mv643xx_eth_receive_queue(dev, orig_budget);
- mp->rx_task.func(dev);
*budget -= work_done;
dev->quota -= work_done;
if (work_done >= orig_budget)
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
- INT_UNMASK_ALL);
+ ETH_INT_UNMASK_ALL);
}
return done ? 0 : 1;
{
int tx_desc_curr;
- tx_desc_curr = mp->tx_curr_desc_q;
-
BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
- mp->tx_desc_count++;
+ tx_desc_curr = mp->tx_curr_desc_q;
mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size;
BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q);
int frag;
int tx_index;
struct eth_tx_desc *desc;
- struct net_device_stats *stats = &mp->stats;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
this_frag->page_offset,
this_frag->size,
DMA_TO_DEVICE);
- stats->tx_bytes += this_frag->size;
}
}
* Ensure the data for an skb to be transmitted is mapped properly,
* then fill in descriptors in the tx hw queue and start the hardware.
*/
-static int eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
- struct sk_buff *skb)
+static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
+ struct sk_buff *skb)
{
int tx_index;
struct eth_tx_desc *desc;
u32 cmd_sts;
int length;
- int tx_bytes = 0;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA;
tx_index = eth_alloc_tx_desc_index(mp);
desc = &mp->p_tx_desc_area[tx_index];
- if (skb_shinfo(skb)->nr_frags) {
+ if (nr_frags) {
eth_tx_fill_frag_descs(mp, skb);
length = skb_headlen(skb);
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
- tx_bytes += length;
if (skb->ip_summed == CHECKSUM_HW) {
BUG_ON(skb->protocol != ETH_P_IP);
/* ensure all descriptors are written before poking hardware */
wmb();
- mv643xx_eth_port_enable_tx(mp->port_num, mp->port_tx_queue_command);
+ mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED);
- return tx_bytes;
+ mp->tx_desc_count += nr_frags + 1;
}
/**
BUG_ON(netif_queue_stopped(dev));
BUG_ON(skb == NULL);
- BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB);
+
+ if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
+ printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
+ netif_stop_queue(dev);
+ return 1;
+ }
if (has_tiny_unaligned_frags(skb)) {
if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
spin_lock_irqsave(&mp->lock, flags);
- stats->tx_bytes = eth_tx_submit_descs_for_skb(mp, skb);
+ eth_tx_submit_descs_for_skb(mp, skb);
+ stats->tx_bytes = skb->len;
stats->tx_packets++;
dev->trans_start = jiffies;
struct mv643xx_private *mp = netdev_priv(netdev);
int port_num = mp->port_num;
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
/* wait for previous write to complete */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
- mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
+ mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
}
#endif
MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE);
/* Enable port Rx. */
- mv643xx_eth_port_enable_rx(port_num, mp->port_rx_queue_command);
+ mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
/* Disable port bandwidth limits by clearing MTU register */
mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
eth_port_write_smi_reg(mp->port_num, location, val);
}
-/*
- * eth_tx_return_desc - Free all used Tx descriptors
- *
- * DESCRIPTION:
- * This routine returns the transmitted packet information to the caller.
- * It uses the 'first' index to support Tx desc return in case a transmit
- * of a packet spanned over multiple buffer still in process.
- * In case the Tx queue was in "resource error" condition, where there are
- * no available Tx resources, the function resets the resource error flag.
- *
- * INPUT:
- * struct mv643xx_private *mp Ethernet Port Control srtuct.
- * struct pkt_info *p_pkt_info User packet buffer.
- *
- * OUTPUT:
- * Tx ring 'first' and 'used' indexes are updated.
- *
- * RETURN:
- * ETH_OK on success
- * ETH_ERROR otherwise.
- *
- */
-static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
- struct pkt_info *p_pkt_info)
-{
- int tx_desc_used;
- struct eth_tx_desc *p_tx_desc_used;
- unsigned int command_status;
- unsigned long flags;
- int err = ETH_OK;
-
- spin_lock_irqsave(&mp->lock, flags);
-
- BUG_ON(mp->tx_desc_count < 0);
- if (mp->tx_desc_count == 0) {
- /* no more tx descs in use */
- err = ETH_ERROR;
- goto out;
- }
-
- /* Get the Tx Desc ring indexes */
- tx_desc_used = mp->tx_used_desc_q;
-
- p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
-
- BUG_ON(p_tx_desc_used == NULL);
-
- command_status = p_tx_desc_used->cmd_sts;
- if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
- /* Still transmitting... */
- err = ETH_ERROR;
- goto out;
- }
-
- /* Pass the packet information to the caller */
- p_pkt_info->cmd_sts = command_status;
- p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
- p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
- p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
- mp->tx_skb[tx_desc_used] = NULL;
-
- /* Update the next descriptor to release. */
- mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size;
-
- BUG_ON(mp->tx_desc_count == 0);
- mp->tx_desc_count--;
-
-out:
- spin_unlock_irqrestore(&mp->lock, flags);
-
- return err;
-}
-
/*
* eth_port_receive - Get received information from Rx ring.
*