* for 64bit hardware platforms.
*
* TODO
- * Big-endian support
* rx_copybreak/alignment
* Scatter gather
* More testing
#include <linux/mii.h>
#include <linux/in.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
static int velocity_nics = 0;
static int msglevel = MSG_LEVEL_INFO;
+/**
+ * mac_get_cam_mask - Read a CAM mask
+ * @regs: register block for this velocity
+ * @mask: buffer to store mask
+ *
+ * Fetch the mask bits of the selected CAM and store them into the
+ * provided mask buffer.
+ */
+
+static void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+ writeb(0, ®s->CAMADDR);
+
+ /* read mask */
+ for (i = 0; i < 8; i++)
+ *mask++ = readb(&(regs->MARCAM[i]));
+
+ /* disable CAMEN */
+ writeb(0, ®s->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+}
+
+
+/**
+ * mac_set_cam_mask - Set a CAM mask
+ * @regs: register block for this velocity
+ * @mask: CAM mask to load
+ *
+ * Store a new mask into a CAM
+ */
+
+static void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+ writeb(CAMADDR_CAMEN, ®s->CAMADDR);
+
+ for (i = 0; i < 8; i++) {
+ writeb(*mask++, &(regs->MARCAM[i]));
+ }
+ /* disable CAMEN */
+ writeb(0, ®s->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+}
+
+static void mac_set_vlan_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
+{
+ int i;
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR);
+
+ for (i = 0; i < 8; i++) {
+ writeb(*mask++, &(regs->MARCAM[i]));
+ }
+ /* disable CAMEN */
+ writeb(0, ®s->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+}
+
+/**
+ * mac_set_cam - set CAM data
+ * @regs: register block of this velocity
+ * @idx: Cam index
+ * @addr: 2 or 6 bytes of CAM data
+ *
+ * Load an address or vlan tag into a CAM
+ */
+
+static void mac_set_cam(struct mac_regs __iomem * regs, int idx, const u8 *addr)
+{
+ int i;
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+ idx &= (64 - 1);
+
+ writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
+
+ for (i = 0; i < 6; i++) {
+ writeb(*addr++, &(regs->MARCAM[i]));
+ }
+ BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
+
+ udelay(10);
+
+ writeb(0, ®s->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+}
+
+static void mac_set_vlan_cam(struct mac_regs __iomem * regs, int idx,
+ const u8 *addr)
+{
+
+ /* Select CAM mask */
+ BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+
+ idx &= (64 - 1);
+
+ writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR);
+ writew(*((u16 *) addr), ®s->MARCAM[0]);
+
+ BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
+
+ udelay(10);
+
+ writeb(0, ®s->CAMADDR);
+
+ /* Select mar */
+ BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
+}
+
+
+/**
+ * mac_wol_reset - reset WOL after exiting low power
+ * @regs: register block of this velocity
+ *
+ * Called after we drop out of wake on lan mode in order to
+ * reset the Wake on lan features. This function doesn't restore
+ * the rest of the logic from the result of sleep/wakeup
+ */
+
+static void mac_wol_reset(struct mac_regs __iomem * regs)
+{
+
+ /* Turn off SWPTAG right after leaving power mode */
+ BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW);
+ /* clear sticky bits */
+ BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
+
+ BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR);
+ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
+ /* disable force PME-enable */
+ writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr);
+ /* disable power-event config bit */
+ writew(0xFFFF, ®s->WOLCRClr);
+ /* clear power status */
+ writew(0xFFFF, ®s->WOLSRClr);
+}
static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static const struct ethtool_ops velocity_ethtool_ops;
#define TX_DESC_DEF 64
VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
-#define VLAN_ID_MIN 0
-#define VLAN_ID_MAX 4095
-#define VLAN_ID_DEF 0
-/* VID_setting[] is used for setting the VID of NIC.
- 0: default VID.
- 1-4094: other VIDs.
-*/
-VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID");
-
#define RX_THRESH_MIN 0
#define RX_THRESH_MAX 3
#define RX_THRESH_DEF 0
*/
VELOCITY_PARAM(DMA_length, "DMA length");
-#define TAGGING_DEF 0
-/* enable_tagging[] is used for enabling 802.1Q VID tagging.
- 0: disable VID seeting(default).
- 1: enable VID setting.
-*/
-VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging");
-
#define IP_ALIG_DEF 0
/* IP_byte_align[] is used for IP header DWORD byte aligned
0: indicate the IP header won't be DWORD byte aligned.(Default) .
* Internal board variants. At the moment we have only one
*/
-static const struct velocity_info_tbl chip_info_table[] __devinitdata = {
+static struct velocity_info_tbl chip_info_table[] = {
{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
{ }
};
* a pointer a static string valid while the driver is loaded.
*/
-static char __devinit *get_chip_name(enum chip_type chip_id)
+static const char __devinit *get_chip_name(enum chip_type chip_id)
{
int i;
for (i = 0; chip_info_table[i].name != NULL; i++)
velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
- velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname);
- velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname);
+
velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
/* Disable all CAMs */
memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
- mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
- mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
-
- /* Enable first VCAM */
- if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
- /* If Tagging option is enabled and VLAN ID is not zero, then
- turn on MCFG_RTGOPT also */
- if (vptr->options.vid != 0)
+ mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
+ mac_set_cam_mask(regs, vptr->mCAMmask);
+
+ /* Enable VCAMs */
+ if (vptr->vlgrp) {
+ unsigned int vid, i = 0;
+
+ if (!vlan_group_get_device(vptr->vlgrp, 0))
WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
- mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM);
- vptr->vCAMmask[0] |= 1;
- mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM);
- } else {
- u16 temp = 0;
- mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
- temp = 1;
- mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM);
+ for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
+ if (vlan_group_get_device(vptr->vlgrp, vid)) {
+ mac_set_vlan_cam(regs, i, (u8 *) &vid);
+ vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+ if (++i >= VCAM_SIZE)
+ break;
+ }
+ }
+ mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
}
+static void velocity_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ vptr->vlgrp = grp;
+}
+
+static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ spin_lock_irq(&vptr->lock);
+ velocity_init_cam_filter(vptr);
+ spin_unlock_irq(&vptr->lock);
+}
+
+static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ spin_lock_irq(&vptr->lock);
+ vlan_group_set_device(vptr->vlgrp, vid, NULL);
+ velocity_init_cam_filter(vptr);
+ spin_unlock_irq(&vptr->lock);
+}
+
+
/**
* velocity_rx_reset - handle a receive reset
* @vptr: velocity we are resetting
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, ®s->RBRDU);
writel(vptr->rd_pool_dma, ®s->RDBaseLo);
vptr->int_mask = INT_MASK_DEF;
- writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo);
+ writel(vptr->rd_pool_dma, ®s->RDBaseLo);
writew(vptr->options.numrx - 1, ®s->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
writew(vptr->options.numtx - 1, ®s->TDCSize);
for (i = 0; i < vptr->num_txq; i++) {
- writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
/* Chain it all together */
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
vptr = netdev_priv(dev);
dev->do_ioctl = velocity_ioctl;
dev->ethtool_ops = &velocity_ethtool_ops;
dev->change_mtu = velocity_change_mtu;
+
+ dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
+ dev->vlan_rx_register = velocity_vlan_rx_register;
+
#ifdef VELOCITY_ZERO_COPY_SUPPORT
dev->features |= NETIF_F_SG;
#endif
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_RX;
- if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
+ if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
dev->features |= NETIF_F_IP_CSUM;
- }
ret = register_netdev(dev);
if (ret < 0)
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
static int velocity_init_rd_ring(struct velocity_info *vptr)
{
- int ret = -ENOMEM;
- unsigned int rsize = sizeof(struct velocity_rd_info) *
- vptr->options.numrx;
+ int ret;
+ int mtu = vptr->dev->mtu;
- vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
- if(vptr->rd_info == NULL)
- goto out;
- memset(vptr->rd_info, 0, rsize);
+ vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
+
+ vptr->rd_info = kcalloc(vptr->options.numrx,
+ sizeof(struct velocity_rd_info), GFP_KERNEL);
+ if (!vptr->rd_info)
+ return -ENOMEM;
vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0;
"%s: failed to allocate RX buffer.\n", vptr->dev->name);
velocity_free_rd_ring(vptr);
}
-out:
+
return ret;
}
dma_addr_t curr;
struct tx_desc *td;
struct velocity_td_info *td_info;
- unsigned int tsize = sizeof(struct velocity_td_info) *
- vptr->options.numtx;
/* Init the TD ring entries */
for (j = 0; j < vptr->num_txq; j++) {
curr = vptr->td_pool_dma[j];
- vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL);
- if(vptr->td_infos[j] == NULL)
- {
+ vptr->td_infos[j] = kcalloc(vptr->options.numtx,
+ sizeof(struct velocity_td_info),
+ GFP_KERNEL);
+ if (!vptr->td_infos[j]) {
while(--j >= 0)
kfree(vptr->td_infos[j]);
return -ENOMEM;
}
- memset(vptr->td_infos[j], 0, tsize);
for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
td = &(vptr->td_rings[j][i]);
if (!vptr->rd_info[rd_curr].skb)
break;
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
rmb();
/*
* Don't drop CE or RL error frame although RXOK is off
*/
- if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
} else {
stats->rx_dropped++;
}
- rd->inten = 1;
+ rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies;
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
- int pkt_len = rd->rdesc0.len;
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->dev);
+ if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
+ vlan_hwaccel_rx(skb, vptr->vlgrp,
+ swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
+ } else
+ netif_rx(skb);
+
stats->rx_bytes += pkt_len;
- netif_rx(skb);
return 0;
}
*/
*((u32 *) & (rd->rdesc0)) = 0;
- rd->len = cpu_to_le32(vptr->rx_buf_sz);
- rd->inten = 1;
+ rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
- * neccessary/
+ * necessary/
*/
static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]);
- if (td->tdesc0.owner == OWNED_BY_NIC)
+ if (td->tdesc0.len & OWNED_BY_NIC)
break;
if ((works++ > 15))
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif
struct velocity_info *vptr = netdev_priv(dev);
int ret;
- vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
-
ret = velocity_init_rings(vptr);
if (ret < 0)
goto out;
return -EINVAL;
}
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
if (new_mtu != oldmtu) {
spin_lock_irqsave(&vptr->lock, flags);
velocity_free_rd_ring(vptr);
dev->mtu = new_mtu;
- if (new_mtu > 8192)
- vptr->rx_buf_sz = 9 * 1024;
- else if (new_mtu > 4096)
- vptr->rx_buf_sz = 8192;
- else
- vptr->rx_buf_sz = 4 * 1024;
ret = velocity_init_rd_ring(vptr);
if (ret < 0)
struct velocity_td_info *tdinfo;
unsigned long flags;
int index;
-
int pktlen = skb->len;
+ __le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]);
- td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC;
- td_ptr->td_buf[0].queue = 0;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
/*
* Pad short frames.
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
pktlen = ETH_ZLEN;
+ len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) {
if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else {
int i = 0;
tdinfo->nskb_dma = 0;
- tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+ td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = ((void *) page_address(frag->page + frag->page_offset));
+ void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].bufsize = frag->size;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
- td_ptr->tdesc1.CMDZ = i;
}
} else
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
}
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
- if (vptr->flags & VELOCITY_FLAGS_TAGGING) {
- td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff);
- td_ptr->tdesc1.pqinf.priority = 0;
- td_ptr->tdesc1.pqinf.CFI = 0;
+ if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
if (prev < 0)
prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]);
- td_ptr->td_buf[0].queue = 1;
+ td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
dev->trans_start = jiffies;
rx_mode = (RCR_AM | RCR_AB);
} else {
int offset = MCAM_SIZE - vptr->multicast_limit;
- mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
+ mac_get_cam_mask(regs, vptr->mCAMmask);
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM);
+ mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
}
- mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM);
+ mac_set_cam_mask(regs, vptr->mCAMmask);
rx_mode = (RCR_AM | RCR_AB);
}
if (dev->mtu > 1500)
velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr);
velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot);
} else {
velocity_save_context(vptr, &vptr->context);
static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct velocity_info *vptr;
+ unsigned long flags;
- if (ifa) {
- struct net_device *dev = ifa->ifa_dev->dev;
- struct velocity_info *vptr;
- unsigned long flags;
+ if (dev_net(dev) != &init_net)
+ return NOTIFY_DONE;
- spin_lock_irqsave(&velocity_dev_list_lock, flags);
- list_for_each_entry(vptr, &velocity_dev_list, list) {
- if (vptr->dev == dev) {
- velocity_get_ip(vptr);
- break;
- }
+ spin_lock_irqsave(&velocity_dev_list_lock, flags);
+ list_for_each_entry(vptr, &velocity_dev_list, list) {
+ if (vptr->dev == dev) {
+ velocity_get_ip(vptr);
+ break;
}
- spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
}
+ spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
+
return NOTIFY_DONE;
}