X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fnet%2Fvia-velocity.c;h=bcbf2fa9b94abd4a700ba843e0148d18d7c58ef4;hb=c03571a3e22b821e5be7bda7b166c4554770f489;hp=1525e8a8984477a7f361dc82645c2394e356a575;hpb=577f99c1d08cf9cbdafd4e858dd13ff04d855090;p=linux-2.6 diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 1525e8a898..bcbf2fa9b9 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -605,7 +605,6 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, static void velocity_init_cam_filter(struct velocity_info *vptr) { struct mac_regs __iomem * regs = vptr->mac_regs; - unsigned short vid; /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); @@ -617,29 +616,33 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) mac_set_vlan_cam_mask(regs, vptr->vCAMmask); mac_set_cam_mask(regs, vptr->mCAMmask); - /* Enable first VCAM */ + /* Enable VCAMs */ if (vptr->vlgrp) { - for (vid = 0; vid < VLAN_VID_MASK; vid++) { - if (vlan_group_get_device(vptr->vlgrp, vid)) { - /* If Tagging option is enabled and - VLAN ID is not zero, then - turn on MCFG_RTGOPT also */ - if (vid != 0) - WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); + unsigned int vid, i = 0; + + if (!vlan_group_get_device(vptr->vlgrp, 0)) + WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); - mac_set_vlan_cam(regs, 0, (u8 *) &vid); + for (vid = 1; (vid < VLAN_VID_MASK); vid++) { + if (vlan_group_get_device(vptr->vlgrp, vid)) { + mac_set_vlan_cam(regs, i, (u8 *) &vid); + vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); + if (++i >= VCAM_SIZE) + break; } } - vptr->vCAMmask[0] |= 1; mac_set_vlan_cam_mask(regs, vptr->vCAMmask); - } else { - u16 temp = 0; - mac_set_vlan_cam(regs, 0, (u8 *) &temp); - temp = 1; - mac_set_vlan_cam_mask(regs, (u8 *) &temp); } } +static void velocity_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct velocity_info *vptr = netdev_priv(dev); + + vptr->vlgrp = grp; +} + static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct velocity_info *vptr = netdev_priv(dev); @@ -959,11 +962,13 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid; dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid; + dev->vlan_rx_register = velocity_vlan_rx_register; #ifdef VELOCITY_ZERO_COPY_SUPPORT dev->features |= NETIF_F_SG; #endif - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER; + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_RX; if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) dev->features |= NETIF_F_IP_CSUM; @@ -1490,24 +1495,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) * enough. This function returns a negative value if the received * packet is too big or if memory is exhausted. */ -static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, - struct velocity_info *vptr) +static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, + struct velocity_info *vptr) { int ret = -1; - if (pkt_size < rx_copybreak) { struct sk_buff *new_skb; - new_skb = dev_alloc_skb(pkt_size + 2); + new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); if (new_skb) { - new_skb->dev = vptr->dev; new_skb->ip_summed = rx_skb[0]->ip_summed; - - if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) - skb_reserve(new_skb, 2); - - skb_copy_from_linear_data(rx_skb[0], new_skb->data, - pkt_size); + skb_reserve(new_skb, 2); + skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); *rx_skb = new_skb; ret = 0; } @@ -1528,12 +1527,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, static inline void velocity_iph_realign(struct velocity_info *vptr, struct sk_buff *skb, int pkt_size) { - /* FIXME - memmove ? */ if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { - int i; - - for (i = pkt_size; i >= 0; i--) - *(skb->data + i + 2) = *(skb->data + i); + memmove(skb->data + 2, skb->data, pkt_size); skb_reserve(skb, 2); } } @@ -1597,8 +1592,13 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) skb_put(skb, pkt_len - 4); skb->protocol = eth_type_trans(skb, vptr->dev); + if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) { + vlan_hwaccel_rx(skb, vptr->vlgrp, + swab16(le16_to_cpu(rd->rdesc1.PQTAG))); + } else + netif_rx(skb); + stats->rx_bytes += pkt_len; - netif_rx(skb); return 0; } @@ -1619,7 +1619,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) struct rx_desc *rd = &(vptr->rd_ring[idx]); struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); - rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); + rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; @@ -1628,7 +1628,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) * 64byte alignment. */ skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); - rd_info->skb->dev = vptr->dev; rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); /* @@ -3464,7 +3463,7 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi struct velocity_info *vptr; unsigned long flags; - if (dev->nd_net != &init_net) + if (dev_net(dev) != &init_net) return NOTIFY_DONE; spin_lock_irqsave(&velocity_dev_list_lock, flags);