X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fcore%2Fdev.c;h=01993ad74e760235267e0ade106bdcac16ae6890;hb=0016fe9d8a1ca99a4642735f41a7753d7c1f7e2b;hp=cbc34c0db3762dd0962ff250078e22679e8d4820;hpb=8725f25acc656c1522d48a6746055099efdaca4c;p=linux-2.6 diff --git a/net/core/dev.c b/net/core/dev.c index cbc34c0db3..01993ad74e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -261,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); DEFINE_PER_CPU(struct softnet_data, softnet_data); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#ifdef CONFIG_LOCKDEP /* * register_netdevice() inits txq->_xmit_lock and sets lockdep class * according to dev->type @@ -301,6 +301,7 @@ static const char *netdev_lock_name[] = "_xmit_NONE"}; static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; +static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; static inline unsigned short netdev_lock_pos(unsigned short dev_type) { @@ -313,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type) return ARRAY_SIZE(netdev_lock_type) - 1; } -static inline void netdev_set_lockdep_class(spinlock_t *lock, - unsigned short dev_type) +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, + unsigned short dev_type) { int i; @@ -322,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock, lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], netdev_lock_name[i]); } + +static inline void netdev_set_addr_lockdep_class(struct net_device *dev) +{ + int i; + + i = netdev_lock_pos(dev->type); + lockdep_set_class_and_name(&dev->addr_list_lock, + &netdev_addr_lock_key[i], + netdev_lock_name[i]); +} #else -static inline void netdev_set_lockdep_class(spinlock_t *lock, - unsigned short dev_type) +static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, + unsigned short dev_type) +{ +} +static inline void netdev_set_addr_lockdep_class(struct net_device *dev) { } #endif @@ -1327,9 +1341,6 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) void __netif_schedule(struct Qdisc *q) { - if (WARN_ON_ONCE(q == &noop_qdisc)) - return; - if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { struct softnet_data *sd; unsigned long flags; @@ -1645,32 +1656,6 @@ out_kfree_skb: return 0; } -/** - * dev_queue_xmit - transmit a buffer - * @skb: buffer to transmit - * - * Queue a buffer for transmission to a network device. The caller must - * have set the device and priority and built the buffer before calling - * this function. The function can be called from an interrupt. - * - * A negative errno code is returned on a failure. A success does not - * guarantee the frame will be transmitted as it may be dropped due - * to congestion or traffic shaping. - * - * ----------------------------------------------------------------------------------- - * I notice this method can also return errors from the queue disciplines, - * including NET_XMIT_DROP, which is a positive value. So, errors can also - * be positive. - * - * Regardless of the return value, the skb is consumed, so it is currently - * difficult to retry a send to this method. (You can bump the ref count - * before sending to hold a reference for retry if you are careful.) - * - * When calling this method, interrupts MUST be enabled. This is because - * the BH enable code must have IRQs enabled so that it will not deadlock. - * --BLG - */ - static u32 simple_tx_hashrnd; static int simple_tx_hashrnd_initialized = 0; @@ -1738,6 +1723,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, return netdev_get_tx_queue(dev, queue_index); } +/** + * dev_queue_xmit - transmit a buffer + * @skb: buffer to transmit + * + * Queue a buffer for transmission to a network device. The caller must + * have set the device and priority and built the buffer before calling + * this function. The function can be called from an interrupt. + * + * A negative errno code is returned on a failure. A success does not + * guarantee the frame will be transmitted as it may be dropped due + * to congestion or traffic shaping. + * + * ----------------------------------------------------------------------------------- + * I notice this method can also return errors from the queue disciplines, + * including NET_XMIT_DROP, which is a positive value. So, errors can also + * be positive. + * + * Regardless of the return value, the skb is consumed, so it is currently + * difficult to retry a send to this method. (You can bump the ref count + * before sending to hold a reference for retry if you are careful.) + * + * When calling this method, interrupts MUST be enabled. This is because + * the BH enable code must have IRQs enabled so that it will not deadlock. + * --BLG + */ int dev_queue_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; @@ -1786,7 +1796,7 @@ gso: skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); #endif if (q->enqueue) { - spinlock_t *root_lock = qdisc_root_lock(q); + spinlock_t *root_lock = qdisc_lock(q); spin_lock(root_lock); @@ -1795,7 +1805,6 @@ gso: spin_unlock(root_lock); - rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; goto out; } @@ -1899,7 +1908,6 @@ int netif_rx(struct sk_buff *skb) if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: - dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -1963,7 +1971,7 @@ static void net_tx_action(struct softirq_action *h) struct sk_buff *skb = clist; clist = clist->next; - BUG_TRAP(!atomic_read(&skb->users)); + WARN_ON(atomic_read(&skb->users)); __kfree_skb(skb); } } @@ -1985,7 +1993,7 @@ static void net_tx_action(struct softirq_action *h) smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { qdisc_run(q); spin_unlock(root_lock); @@ -2090,7 +2098,7 @@ static int ing_filter(struct sk_buff *skb) rxq = &dev->rx_queue; q = rxq->qdisc; - if (q) { + if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); result = qdisc_enqueue_root(skb, q); spin_unlock(qdisc_lock(q)); @@ -2103,7 +2111,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, struct net_device *orig_dev) { - if (!skb->dev->rx_queue.qdisc) + if (skb->dev->rx_queue.qdisc == &noop_qdisc) goto out; if (*pt_prev) { @@ -2260,6 +2268,20 @@ out: return ret; } +/* Network device is going away, flush any packets still pending */ +static void flush_backlog(void *arg) +{ + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) + if (skb->dev == dev) { + __skb_unlink(skb, &queue->input_pkt_queue); + kfree_skb(skb); + } +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; @@ -2269,7 +2291,6 @@ static int process_backlog(struct napi_struct *napi, int quota) napi->weight = weight_p; do { struct sk_buff *skb; - struct net_device *dev; local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); @@ -2278,14 +2299,9 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); break; } - local_irq_enable(); - dev = skb->dev; - netif_receive_skb(skb); - - dev_put(dev); } while (++work < quota && jiffies == start_time); return work; @@ -2385,7 +2401,7 @@ out: */ if (!cpus_empty(net_dma.channel_mask)) { int chan_idx; - for_each_cpu_mask(chan_idx, net_dma.channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) { struct dma_chan *chan = net_dma.channels[chan_idx]; if (chan) dma_async_memcpy_issue_pending(chan); @@ -3837,7 +3853,7 @@ static void rollback_registered(struct net_device *dev) dev->uninit(dev); /* Notifier chain MUST detach us from master device. */ - BUG_TRAP(!dev->master); + WARN_ON(dev->master); /* Remove entries from kobject tree */ netdev_unregister_kobject(dev); @@ -3852,7 +3868,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, void *_unused) { spin_lock_init(&dev_queue->_xmit_lock); - netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); + netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); dev_queue->xmit_lock_owner = -1; } @@ -3897,6 +3913,7 @@ int register_netdevice(struct net_device *dev) net = dev_net(dev); spin_lock_init(&dev->addr_list_lock); + netdev_set_addr_lockdep_class(dev); netdev_init_queue_locks(dev); dev->iflink = -1; @@ -3977,6 +3994,10 @@ int register_netdevice(struct net_device *dev) } } + /* Enable software GSO if SG is supported. */ + if (dev->features & NETIF_F_SG) + dev->features |= NETIF_F_GSO; + netdev_initialize_kobject(dev); ret = netdev_register_kobject(dev); if (ret) @@ -4154,13 +4175,15 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; + on_each_cpu(flush_backlog, dev, 1); + netdev_wait_allrefs(dev); /* paranoia */ BUG_ON(atomic_read(&dev->refcnt)); - BUG_TRAP(!dev->ip_ptr); - BUG_TRAP(!dev->ip6_ptr); - BUG_TRAP(!dev->dn_ptr); + WARN_ON(dev->ip_ptr); + WARN_ON(dev->ip6_ptr); + WARN_ON(dev->dn_ptr); if (dev->destructor) dev->destructor(dev); @@ -4189,6 +4212,7 @@ static void netdev_init_queues(struct net_device *dev) { netdev_init_one_queue(dev, &dev->rx_queue, NULL); netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); + spin_lock_init(&dev->tx_global_lock); } /** @@ -4207,7 +4231,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, { struct netdev_queue *tx; struct net_device *dev; - int alloc_size; + size_t alloc_size; void *p; BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -4227,7 +4251,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, return NULL; } - tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); + tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL); if (!tx) { printk(KERN_ERR "alloc_netdev: Unable to allocate " "tx qdiscs.\n"); @@ -4519,7 +4543,7 @@ static void net_dma_rebalance(struct net_dma *net_dma) i = 0; cpu = first_cpu(cpu_online_map); - for_each_cpu_mask(chan_idx, net_dma->channel_mask) { + for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) { chan = net_dma->channels[chan_idx]; n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) @@ -4686,6 +4710,26 @@ err_name: return -ENOMEM; } +char *netdev_drivername(struct net_device *dev, char *buffer, int len) +{ + struct device_driver *driver; + struct device *parent; + + if (len <= 0 || !buffer) + return buffer; + buffer[0] = 0; + + parent = dev->dev.parent; + + if (!parent) + return buffer; + + driver = parent->driver; + if (driver && driver->name) + strlcpy(buffer, driver->name, len); + return buffer; +} + static void __net_exit netdev_exit(struct net *net) { kfree(net->dev_name_head);