X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fcore%2Fdev.c;h=63d6bcddbf46d1b09388191da411d038b77adee6;hb=00e9028a95fb8a4d79f2fb695a853f33ea7d3b57;hp=e54acde839dac1ecefc360ddb5aa0f9dc12134ff;hpb=49997d75152b3d23c53b0fa730599f2f74c92c65;p=linux-2.6 diff --git a/net/core/dev.c b/net/core/dev.c index e54acde839..63d6bcddbf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -124,6 +124,8 @@ #include #include #include +#include +#include #include "net-sysfs.h" @@ -259,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); DEFINE_PER_CPU(struct softnet_data, softnet_data); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#ifdef CONFIG_LOCKDEP /* * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type @@ -299,6 +301,7 @@ static const char *netdev_lock_name[] = "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; +static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static inline unsigned short netdev_lock_pos(unsigned short dev_type) { @@ -311,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type) return ARRAY_SIZE(netdev_lock_type) - 1; } -static inline void netdev_set_lockdep_class(spinlock_t *lock, - unsigned short dev_type) +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, + unsigned short dev_type) { int i; @@ -320,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock, lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], netdev_lock_name[i]); } + +static inline void netdev_set_addr_lockdep_class(struct net_device *dev) +{ + int i; + + i = netdev_lock_pos(dev->type); + lockdep_set_class_and_name(&dev->addr_list_lock, + &netdev_addr_lock_key[i], + netdev_lock_name[i]); +} #else -static inline void netdev_set_lockdep_class(spinlock_t *lock, - unsigned short dev_type) +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, + unsigned short dev_type) +{ +} +static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { } #endif @@ -1325,8 +1341,6 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) void __netif_schedule(struct Qdisc *q) { - BUG_ON(q == &noop_qdisc); - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { struct softnet_data *sd; unsigned long flags; @@ -1642,60 +1656,37 @@ out_kfree_skb: return 0; } -/** - * dev_queue_xmit - transmit a buffer - * @skb: buffer to transmit - * - * Queue a buffer for transmission to a network device. The caller must - * have set the device and priority and built the buffer before calling - * this function. The function can be called from an interrupt. - * - * A negative errno code is returned on a failure. A success does not - * guarantee the frame will be transmitted as it may be dropped due - * to congestion or traffic shaping. - * - * ----------------------------------------------------------------------------------- - * I notice this method can also return errors from the queue disciplines, - * including NET_XMIT_DROP, which is a positive value. So, errors can also - * be positive. - * - * Regardless of the return value, the skb is consumed, so it is currently - * difficult to retry a send to this method. (You can bump the ref count - * before sending to hold a reference for retry if you are careful.) - * - * When calling this method, interrupts MUST be enabled. This is because - * the BH enable code must have IRQs enabled so that it will not deadlock. - * --BLG - */ +static u32 simple_tx_hashrnd; +static int simple_tx_hashrnd_initialized = 0; static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) { - u32 *addr, *ports, hash, ihl; + u32 addr1, addr2, ports; + u32 hash, ihl; u8 ip_proto; - int alen; + + if (unlikely(!simple_tx_hashrnd_initialized)) { + get_random_bytes(&simple_tx_hashrnd, 4); + simple_tx_hashrnd_initialized = 1; + } switch (skb->protocol) { case __constant_htons(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; - addr = &ip_hdr(skb)->saddr; + addr1 = ip_hdr(skb)->saddr; + addr2 = ip_hdr(skb)->daddr; ihl = ip_hdr(skb)->ihl; - alen = 2; break; case __constant_htons(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; - addr = &ipv6_hdr(skb)->saddr.s6_addr32[0]; + addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; + addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; ihl = (40 >> 2); - alen = 8; break; default: return 0; } - ports = (u32 *) (skb_network_header(skb) + (ihl * 4)); - - hash = 0; - while (alen--) - hash ^= *addr++; switch (ip_proto) { case IPPROTO_TCP: @@ -1705,14 +1696,17 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) case IPPROTO_AH: case IPPROTO_SCTP: case IPPROTO_UDPLITE: - hash ^= *ports; + ports = *((u32 *) (skb_network_header(skb) + (ihl * 4))); break; default: + ports = 0; break; } - return hash % dev->real_num_tx_queues; + hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd); + + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); } static struct netdev_queue *dev_pick_tx(struct net_device *dev, @@ -1729,6 +1723,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, return netdev_get_tx_queue(dev, queue_index); } +/** + * dev_queue_xmit - transmit a buffer + * @skb: buffer to transmit + * + * Queue a buffer for transmission to a network device. The caller must + * have set the device and priority and built the buffer before calling + * this function. The function can be called from an interrupt. + * + * A negative errno code is returned on a failure. A success does not + * guarantee the frame will be transmitted as it may be dropped due + * to congestion or traffic shaping. + * + * ----------------------------------------------------------------------------------- + * I notice this method can also return errors from the queue disciplines, + * including NET_XMIT_DROP, which is a positive value. So, errors can also + * be positive. + * + * Regardless of the return value, the skb is consumed, so it is currently + * difficult to retry a send to this method. (You can bump the ref count + * before sending to hold a reference for retry if you are careful.) + * + * When calling this method, interrupts MUST be enabled. This is because + * the BH enable code must have IRQs enabled so that it will not deadlock. + * --BLG + */ int dev_queue_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; @@ -1781,7 +1800,7 @@ gso: spin_lock(root_lock); - rc = q->enqueue(skb, q); + rc = qdisc_enqueue_root(skb, q); qdisc_run(q); spin_unlock(root_lock); @@ -1954,7 +1973,7 @@ static void net_tx_action(struct softirq_action *h) struct sk_buff *skb = clist; clist = clist->next; - BUG_TRAP(!atomic_read(&skb->users)); + WARN_ON(atomic_read(&skb->users)); __kfree_skb(skb); } } @@ -2081,9 +2100,9 @@ static int ing_filter(struct sk_buff *skb) rxq = &dev->rx_queue; q = rxq->qdisc; - if (q) { + if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); - result = q->enqueue(skb, q); + result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); } @@ -2094,7 +2113,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { - if (!skb->dev->rx_queue.qdisc) + if (skb->dev->rx_queue.qdisc == &noop_qdisc) goto out; if (*pt_prev) { @@ -2376,7 +2395,7 @@ out: */ if (!cpus_empty(net_dma.channel_mask)) { int chan_idx; - for_each_cpu_mask(chan_idx, net_dma.channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { struct dma_chan *chan = net_dma.channels[chan_idx]; if (chan) dma_async_memcpy_issue_pending(chan); @@ -3828,7 +3847,7 @@ static void rollback_registered(struct net_device *dev) dev->uninit(dev); /* Notifier chain MUST detach us from master device. */ - BUG_TRAP(!dev->master); + WARN_ON(dev->master); /* Remove entries from kobject tree */ netdev_unregister_kobject(dev); @@ -3843,7 +3862,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, void *_unused) { spin_lock_init(&dev_queue->_xmit_lock); - netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); + netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); dev_queue->xmit_lock_owner = -1; } @@ -3888,8 +3907,7 @@ int register_netdevice(struct net_device *dev) net = dev_net(dev); spin_lock_init(&dev->addr_list_lock); - spin_lock_init(&dev->qdisc_list_lock); - INIT_LIST_HEAD(&dev->qdisc_list); + netdev_set_addr_lockdep_class(dev); netdev_init_queue_locks(dev); dev->iflink = -1; @@ -4151,9 +4169,9 @@ void netdev_run_todo(void) /* paranoia */ BUG_ON(atomic_read(&dev->refcnt)); - BUG_TRAP(!dev->ip_ptr); - BUG_TRAP(!dev->ip6_ptr); - BUG_TRAP(!dev->dn_ptr); + WARN_ON(dev->ip_ptr); + WARN_ON(dev->ip6_ptr); + WARN_ON(dev->dn_ptr); if (dev->destructor) dev->destructor(dev); @@ -4200,7 +4218,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, { struct netdev_queue *tx; struct net_device *dev; - int alloc_size; + size_t alloc_size; void *p; BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -4220,7 +4238,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, return NULL; } - tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); + tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL); if (!tx) { printk(KERN_ERR "alloc_netdev: Unable to allocate " "tx qdiscs.\n"); @@ -4512,7 +4530,7 @@ static void net_dma_rebalance(struct net_dma *net_dma) i = 0; cpu = first_cpu(cpu_online_map); - for_each_cpu_mask(chan_idx, net_dma->channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { chan = net_dma->channels[chan_idx]; n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) @@ -4679,6 +4697,26 @@ err_name: return -ENOMEM; } +char *netdev_drivername(struct net_device *dev, char *buffer, int len) +{ + struct device_driver *driver; + struct device *parent; + + if (len <= 0 || !buffer) + return buffer; + buffer[0] = 0; + + parent = dev->dev.parent; + + if (!parent) + return buffer; + + driver = parent->driver; + if (driver && driver->name) + strlcpy(buffer, driver->name, len); + return buffer; +} + static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head);