]> err.no Git - linux-2.6/blobdiff - net/core/dev.c
Pull bugzilla-7880 into release branch
[linux-2.6] / net / core / dev.c
index 36e9bf8ec4af24facfb95876f19ab55d8d1f57f3..ee4035571c21b55e119bbf4c6c2f6f1a4ee085ab 100644 (file)
@@ -98,6 +98,7 @@
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
@@ -151,9 +152,22 @@ static struct list_head ptype_base[16] __read_mostly;      /* 16 way hashed list */
 static struct list_head ptype_all __read_mostly;       /* Taps */
 
 #ifdef CONFIG_NET_DMA
-static struct dma_client *net_dma_client;
-static unsigned int net_dma_count;
-static spinlock_t net_dma_event_lock;
+struct net_dma {
+       struct dma_client client;
+       spinlock_t lock;
+       cpumask_t channel_mask;
+       struct dma_chan *channels[NR_CPUS];
+};
+
+static enum dma_state_client
+netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
+       enum dma_state state);
+
+static struct net_dma net_dma = {
+       .client = {
+               .event_callback = netdev_dma_event,
+       },
+};
 #endif
 
 /*
@@ -1429,7 +1443,9 @@ gso:
                        skb->next = nskb;
                        return rc;
                }
-               if (unlikely(netif_queue_stopped(dev) && skb->next))
+               if (unlikely((netif_queue_stopped(dev) ||
+                            netif_subqueue_stopped(dev, skb->queue_mapping)) &&
+                            skb->next))
                        return NETDEV_TX_BUSY;
        } while (skb->next);
 
@@ -1509,11 +1525,11 @@ int dev_queue_xmit(struct sk_buff *skb)
                skb_set_transport_header(skb, skb->csum_start -
                                              skb_headroom(skb));
 
-               if (!(dev->features & NETIF_F_GEN_CSUM)
-                   || ((dev->features & NETIF_F_IP_CSUM)
-                       && skb->protocol == htons(ETH_P_IP))
-                   || ((dev->features & NETIF_F_IPV6_CSUM)
-                       && skb->protocol == htons(ETH_P_IPV6)))
+               if (!(dev->features & NETIF_F_GEN_CSUM) &&
+                   !((dev->features & NETIF_F_IP_CSUM) &&
+                     skb->protocol == htons(ETH_P_IP)) &&
+                   !((dev->features & NETIF_F_IPV6_CSUM) &&
+                     skb->protocol == htons(ETH_P_IPV6)))
                        if (skb_checksum_help(skb))
                                goto out_kfree_skb;
        }
@@ -1547,6 +1563,8 @@ gso:
                spin_lock(&dev->queue_lock);
                q = dev->qdisc;
                if (q->enqueue) {
+                       /* reset queue_mapping to zero */
+                       skb->queue_mapping = 0;
                        rc = q->enqueue(skb, q);
                        qdisc_run(dev);
                        spin_unlock(&dev->queue_lock);
@@ -1576,7 +1594,8 @@ gso:
 
                        HARD_TX_LOCK(dev, cpu);
 
-                       if (!netif_queue_stopped(dev)) {
+                       if (!netif_queue_stopped(dev) &&
+                           !netif_subqueue_stopped(dev, skb->queue_mapping)) {
                                rc = 0;
                                if (!dev_hard_start_xmit(skb, dev)) {
                                        HARD_TX_UNLOCK(dev);
@@ -1795,6 +1814,28 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
 #define handle_bridge(skb, pt_prev, ret, orig_dev)     (skb)
 #endif
 
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
+EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
+
+static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
+                                            struct packet_type **pt_prev,
+                                            int *ret,
+                                            struct net_device *orig_dev)
+{
+       if (skb->dev->macvlan_port == NULL)
+               return skb;
+
+       if (*pt_prev) {
+               *ret = deliver_skb(skb, *pt_prev, orig_dev);
+               *pt_prev = NULL;
+       }
+       return macvlan_handle_frame_hook(skb);
+}
+#else
+#define handle_macvlan(skb, pt_prev, ret, orig_dev)    (skb)
+#endif
+
 #ifdef CONFIG_NET_CLS_ACT
 /* TODO: Maybe we should just force sch_ingress to be compiled in
  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -1900,6 +1941,9 @@ ncls:
 #endif
 
        skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
+       if (!skb)
+               goto out;
+       skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
        if (!skb)
                goto out;
 
@@ -2017,12 +2061,13 @@ out:
         * There may not be any more sk_buffs coming right now, so push
         * any pending DMA copies to hardware
         */
-       if (net_dma_client) {
-               struct dma_chan *chan;
-               rcu_read_lock();
-               list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
-                       dma_async_memcpy_issue_pending(chan);
-               rcu_read_unlock();
+       if (!cpus_empty(net_dma.channel_mask)) {
+               int chan_idx;
+               for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
+                       struct dma_chan *chan = net_dma.channels[chan_idx];
+                       if (chan)
+                               dma_async_memcpy_issue_pending(chan);
+               }
        }
 #endif
        return;
@@ -2502,6 +2547,8 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
 {
        unsigned short old_flags = dev->flags;
 
+       ASSERT_RTNL();
+
        if ((dev->promiscuity += inc) == 0)
                dev->flags &= ~IFF_PROMISC;
        else
@@ -2516,6 +2563,9 @@ static void __dev_set_promiscuity(struct net_device *dev, int inc)
                        dev->name, (dev->flags & IFF_PROMISC),
                        (old_flags & IFF_PROMISC),
                        audit_get_loginuid(current->audit_context));
+
+               if (dev->change_rx_flags)
+                       dev->change_rx_flags(dev, IFF_PROMISC);
        }
 }
 
@@ -2554,11 +2604,16 @@ void dev_set_allmulti(struct net_device *dev, int inc)
 {
        unsigned short old_flags = dev->flags;
 
+       ASSERT_RTNL();
+
        dev->flags |= IFF_ALLMULTI;
        if ((dev->allmulti += inc) == 0)
                dev->flags &= ~IFF_ALLMULTI;
-       if (dev->flags ^ old_flags)
+       if (dev->flags ^ old_flags) {
+               if (dev->change_rx_flags)
+                       dev->change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
+       }
 }
 
 /*
@@ -2574,7 +2629,7 @@ void __dev_set_rx_mode(struct net_device *dev)
                return;
 
        if (!netif_device_present(dev))
-               return;
+               return;
 
        if (dev->set_rx_mode)
                dev->set_rx_mode(dev);
@@ -2602,8 +2657,8 @@ void dev_set_rx_mode(struct net_device *dev)
        netif_tx_unlock_bh(dev);
 }
 
-int __dev_addr_delete(struct dev_addr_list **list, void *addr, int alen,
-                     int glbl)
+int __dev_addr_delete(struct dev_addr_list **list, int *count,
+                     void *addr, int alen, int glbl)
 {
        struct dev_addr_list *da;
 
@@ -2621,13 +2676,15 @@ int __dev_addr_delete(struct dev_addr_list **list, void *addr, int alen,
 
                        *list = da->next;
                        kfree(da);
+                       (*count)--;
                        return 0;
                }
        }
        return -ENOENT;
 }
 
-int __dev_addr_add(struct dev_addr_list **list, void *addr, int alen, int glbl)
+int __dev_addr_add(struct dev_addr_list **list, int *count,
+                  void *addr, int alen, int glbl)
 {
        struct dev_addr_list *da;
 
@@ -2654,23 +2711,10 @@ int __dev_addr_add(struct dev_addr_list **list, void *addr, int alen, int glbl)
        da->da_gusers = glbl ? 1 : 0;
        da->next = *list;
        *list = da;
+       (*count)++;
        return 0;
 }
 
-void __dev_addr_discard(struct dev_addr_list **list)
-{
-       struct dev_addr_list *tmp;
-
-       while (*list != NULL) {
-               tmp = *list;
-               *list = tmp->next;
-               if (tmp->da_users > tmp->da_gusers)
-                       printk("__dev_addr_discard: address leakage! "
-                              "da_users=%d\n", tmp->da_users);
-               kfree(tmp);
-       }
-}
-
 /**
  *     dev_unicast_delete      - Release secondary unicast address.
  *     @dev: device
@@ -2687,11 +2731,9 @@ int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
        ASSERT_RTNL();
 
        netif_tx_lock_bh(dev);
-       err = __dev_addr_delete(&dev->uc_list, addr, alen, 0);
-       if (!err) {
-               dev->uc_count--;
+       err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+       if (!err)
                __dev_set_rx_mode(dev);
-       }
        netif_tx_unlock_bh(dev);
        return err;
 }
@@ -2713,21 +2755,38 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
        ASSERT_RTNL();
 
        netif_tx_lock_bh(dev);
-       err = __dev_addr_add(&dev->uc_list, addr, alen, 0);
-       if (!err) {
-               dev->uc_count++;
+       err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+       if (!err)
                __dev_set_rx_mode(dev);
-       }
        netif_tx_unlock_bh(dev);
        return err;
 }
 EXPORT_SYMBOL(dev_unicast_add);
 
-static void dev_unicast_discard(struct net_device *dev)
+static void __dev_addr_discard(struct dev_addr_list **list)
+{
+       struct dev_addr_list *tmp;
+
+       while (*list != NULL) {
+               tmp = *list;
+               *list = tmp->next;
+               if (tmp->da_users > tmp->da_gusers)
+                       printk("__dev_addr_discard: address leakage! "
+                              "da_users=%d\n", tmp->da_users);
+               kfree(tmp);
+       }
+}
+
+static void dev_addr_discard(struct net_device *dev)
 {
        netif_tx_lock_bh(dev);
+
        __dev_addr_discard(&dev->uc_list);
        dev->uc_count = 0;
+
+       __dev_addr_discard(&dev->mc_list);
+       dev->mc_count = 0;
+
        netif_tx_unlock_bh(dev);
 }
 
@@ -2760,6 +2819,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
        int ret, changes;
        int old_flags = dev->flags;
 
+       ASSERT_RTNL();
+
        /*
         *      Set the flags on our device.
         */
@@ -2774,6 +2835,9 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
         *      Load in the correct multicast list now the flags have changed.
         */
 
+       if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
+               dev->change_rx_flags(dev, IFF_MULTICAST);
+
        dev_set_rx_mode(dev);
 
        /*
@@ -3539,16 +3603,18 @@ static struct net_device_stats *internal_stats(struct net_device *dev)
 }
 
 /**
- *     alloc_netdev - allocate network device
+ *     alloc_netdev_mq - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
  *     @name:          device name format string
  *     @setup:         callback to initialize device
+ *     @queue_count:   the number of subqueues to allocate
  *
  *     Allocates a struct net_device with private data area for driver use
- *     and performs basic initialization.
+ *     and performs basic initialization.  Also allocates subquue structs
+ *     for each queue on the device at the end of the netdevice.
  */
-struct net_device *alloc_netdev(int sizeof_priv, const char *name,
-               void (*setup)(struct net_device *))
+struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+               void (*setup)(struct net_device *), unsigned int queue_count)
 {
        void *p;
        struct net_device *dev;
@@ -3557,7 +3623,9 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
        BUG_ON(strlen(name) >= sizeof(dev->name));
 
        /* ensure 32-byte alignment of both the device and private area */
-       alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+       alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
+                    (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
+                    ~NETDEV_ALIGN_CONST;
        alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
@@ -3570,15 +3638,22 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
                (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
        dev->padded = (char *)dev - (char *)p;
 
-       if (sizeof_priv)
-               dev->priv = netdev_priv(dev);
+       if (sizeof_priv) {
+               dev->priv = ((char *)dev +
+                            ((sizeof(struct net_device) +
+                              (sizeof(struct net_device_subqueue) *
+                               (queue_count - 1)) + NETDEV_ALIGN_CONST)
+                             & ~NETDEV_ALIGN_CONST));
+       }
+
+       dev->egress_subqueue_count = queue_count;
 
        dev->get_stats = internal_stats;
        setup(dev);
        strcpy(dev->name, name);
        return dev;
 }
-EXPORT_SYMBOL(alloc_netdev);
+EXPORT_SYMBOL(alloc_netdev_mq);
 
 /**
  *     free_netdev - free network device
@@ -3669,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev)
        /*
         *      Flush the unicast and multicast chains
         */
-       dev_unicast_discard(dev);
-       dev_mc_discard(dev);
+       dev_addr_discard(dev);
 
        if (dev->uninit)
                dev->uninit(dev);
@@ -3760,12 +3834,13 @@ static int dev_cpu_callback(struct notifier_block *nfb,
  * This is called when the number of channels allocated to the net_dma_client
  * changes.  The net_dma_client tries to have one DMA channel per CPU.
  */
-static void net_dma_rebalance(void)
+
+static void net_dma_rebalance(struct net_dma *net_dma)
 {
-       unsigned int cpu, i, n;
+       unsigned int cpu, i, n, chan_idx;
        struct dma_chan *chan;
 
-       if (net_dma_count == 0) {
+       if (cpus_empty(net_dma->channel_mask)) {
                for_each_online_cpu(cpu)
                        rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
                return;
@@ -3774,10 +3849,12 @@ static void net_dma_rebalance(void)
        i = 0;
        cpu = first_cpu(cpu_online_map);
 
-       rcu_read_lock();
-       list_for_each_entry(chan, &net_dma_client->channels, client_node) {
-               n = ((num_online_cpus() / net_dma_count)
-                  + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
+       for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
+               chan = net_dma->channels[chan_idx];
+
+               n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
+                  + (i < (num_online_cpus() %
+                       cpus_weight(net_dma->channel_mask)) ? 1 : 0));
 
                while(n) {
                        per_cpu(softnet_data, cpu).net_dma = chan;
@@ -3786,7 +3863,6 @@ static void net_dma_rebalance(void)
                }
                i++;
        }
-       rcu_read_unlock();
 }
 
 /**
@@ -3795,23 +3871,53 @@ static void net_dma_rebalance(void)
  * @chan: DMA channel for the event
  * @event: event type
  */
-static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-       enum dma_event event)
-{
-       spin_lock(&net_dma_event_lock);
-       switch (event) {
-       case DMA_RESOURCE_ADDED:
-               net_dma_count++;
-               net_dma_rebalance();
+static enum dma_state_client
+netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
+       enum dma_state state)
+{
+       int i, found = 0, pos = -1;
+       struct net_dma *net_dma =
+               container_of(client, struct net_dma, client);
+       enum dma_state_client ack = DMA_DUP; /* default: take no action */
+
+       spin_lock(&net_dma->lock);
+       switch (state) {
+       case DMA_RESOURCE_AVAILABLE:
+               for (i = 0; i < NR_CPUS; i++)
+                       if (net_dma->channels[i] == chan) {
+                               found = 1;
+                               break;
+                       } else if (net_dma->channels[i] == NULL && pos < 0)
+                               pos = i;
+
+               if (!found && pos >= 0) {
+                       ack = DMA_ACK;
+                       net_dma->channels[pos] = chan;
+                       cpu_set(pos, net_dma->channel_mask);
+                       net_dma_rebalance(net_dma);
+               }
                break;
        case DMA_RESOURCE_REMOVED:
-               net_dma_count--;
-               net_dma_rebalance();
+               for (i = 0; i < NR_CPUS; i++)
+                       if (net_dma->channels[i] == chan) {
+                               found = 1;
+                               pos = i;
+                               break;
+                       }
+
+               if (found) {
+                       ack = DMA_ACK;
+                       cpu_clear(pos, net_dma->channel_mask);
+                       net_dma->channels[i] = NULL;
+                       net_dma_rebalance(net_dma);
+               }
                break;
        default:
                break;
        }
-       spin_unlock(&net_dma_event_lock);
+       spin_unlock(&net_dma->lock);
+
+       return ack;
 }
 
 /**
@@ -3819,12 +3925,10 @@ static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
  */
 static int __init netdev_dma_register(void)
 {
-       spin_lock_init(&net_dma_event_lock);
-       net_dma_client = dma_async_client_register(netdev_dma_event);
-       if (net_dma_client == NULL)
-               return -ENOMEM;
-
-       dma_async_client_chan_request(net_dma_client, num_online_cpus());
+       spin_lock_init(&net_dma.lock);
+       dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
+       dma_async_client_register(&net_dma.client);
+       dma_async_client_chan_request(&net_dma.client);
        return 0;
 }