2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/kallsyms.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
125 #include "net-sysfs.h"
128 * The list of packet types we will receive (as opposed to discard)
129 * and the routines to invoke.
131 * Why 16. Because with 16 the only overlap we get on a hash of the
132 * low nibble of the protocol value is RARP/SNAP/X.25.
134 * NOTE: That is no longer true with the addition of VLAN tags. Not
135 * sure which should go first, but I bet it won't make much
136 * difference if we are running VLANs. The good news is that
137 * this protocol won't be in the list unless compiled in, so
138 * the average user (w/out VLANs) will not be adversely affected.
155 #define PTYPE_HASH_SIZE (16)
156 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
158 static DEFINE_SPINLOCK(ptype_lock);
159 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 static struct list_head ptype_all __read_mostly; /* Taps */
162 #ifdef CONFIG_NET_DMA
164 struct dma_client client;
166 cpumask_t channel_mask;
167 struct dma_chan **channels;
170 static enum dma_state_client
171 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
172 enum dma_state state);
174 static struct net_dma net_dma = {
176 .event_callback = netdev_dma_event,
182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
185 * Pure readers hold dev_base_lock for reading.
187 * Writers must hold the rtnl semaphore while they loop through the
188 * dev_base_head list, and hold dev_base_lock for writing when they do the
189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
200 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
204 #define NETDEV_HASHBITS 8
205 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
207 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
218 /* Device list insertion */
219 static int list_netdevice(struct net_device *dev)
221 struct net *net = dev_net(dev);
225 write_lock_bh(&dev_base_lock);
226 list_add_tail(&dev->dev_list, &net->dev_base_head);
227 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
228 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
229 write_unlock_bh(&dev_base_lock);
233 /* Device list removal */
234 static void unlist_netdevice(struct net_device *dev)
238 /* Unlink dev from the device chain */
239 write_lock_bh(&dev_base_lock);
240 list_del(&dev->dev_list);
241 hlist_del(&dev->name_hlist);
242 hlist_del(&dev->index_hlist);
243 write_unlock_bh(&dev_base_lock);
250 static RAW_NOTIFIER_HEAD(netdev_chain);
253 * Device drivers call our routines to queue packets here. We empty the
254 * queue in the local softnet handler.
257 DEFINE_PER_CPU(struct softnet_data, softnet_data);
259 #ifdef CONFIG_DEBUG_LOCK_ALLOC
261 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
262 * according to dev->type
264 static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
277 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
278 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
281 static const char *netdev_lock_name[] =
282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
294 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
295 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
298 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
300 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
311 static inline void netdev_set_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
321 static inline void netdev_set_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
327 /*******************************************************************************
329 Protocol management and registration routines
331 *******************************************************************************/
334 * Add a protocol ID to the list. Now that the input handler is
335 * smarter we can dispense with all the messy stuff that used to be
338 * BEWARE!!! Protocol handlers, mangling input packets,
339 * MUST BE last in hash buckets and checking protocol handlers
340 * MUST start from promiscuous ptype_all chain in net_bh.
341 * It is true now, do not change it.
342 * Explanation follows: if protocol handler, mangling packet, will
343 * be the first on list, it is not able to sense, that packet
344 * is cloned and should be copied-on-write, so that it will
345 * change it and subsequent readers will get broken packet.
350 * dev_add_pack - add packet handler
351 * @pt: packet type declaration
353 * Add a protocol handler to the networking stack. The passed &packet_type
354 * is linked into kernel lists and may not be freed until it has been
355 * removed from the kernel lists.
357 * This call does not sleep therefore it can not
358 * guarantee all CPU's that are in middle of receiving packets
359 * will see the new packet type (until the next received packet).
362 void dev_add_pack(struct packet_type *pt)
366 spin_lock_bh(&ptype_lock);
367 if (pt->type == htons(ETH_P_ALL))
368 list_add_rcu(&pt->list, &ptype_all);
370 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
371 list_add_rcu(&pt->list, &ptype_base[hash]);
373 spin_unlock_bh(&ptype_lock);
377 * __dev_remove_pack - remove packet handler
378 * @pt: packet type declaration
380 * Remove a protocol handler that was previously added to the kernel
381 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
382 * from the kernel lists and can be freed or reused once this function
385 * The packet type might still be in use by receivers
386 * and must not be freed until after all the CPU's have gone
387 * through a quiescent state.
389 void __dev_remove_pack(struct packet_type *pt)
391 struct list_head *head;
392 struct packet_type *pt1;
394 spin_lock_bh(&ptype_lock);
396 if (pt->type == htons(ETH_P_ALL))
399 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
401 list_for_each_entry(pt1, head, list) {
403 list_del_rcu(&pt->list);
408 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
410 spin_unlock_bh(&ptype_lock);
413 * dev_remove_pack - remove packet handler
414 * @pt: packet type declaration
416 * Remove a protocol handler that was previously added to the kernel
417 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
418 * from the kernel lists and can be freed or reused once this function
421 * This call sleeps to guarantee that no CPU is looking at the packet
424 void dev_remove_pack(struct packet_type *pt)
426 __dev_remove_pack(pt);
431 /******************************************************************************
433 Device Boot-time Settings Routines
435 *******************************************************************************/
437 /* Boot time configuration table */
438 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
441 * netdev_boot_setup_add - add new setup entry
442 * @name: name of the device
443 * @map: configured settings for the device
445 * Adds new setup entry to the dev_boot_setup list. The function
446 * returns 0 on error and 1 on success. This is a generic routine to
449 static int netdev_boot_setup_add(char *name, struct ifmap *map)
451 struct netdev_boot_setup *s;
455 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
456 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
457 memset(s[i].name, 0, sizeof(s[i].name));
458 strlcpy(s[i].name, name, IFNAMSIZ);
459 memcpy(&s[i].map, map, sizeof(s[i].map));
464 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
468 * netdev_boot_setup_check - check boot time settings
469 * @dev: the netdevice
471 * Check boot time settings for the device.
472 * The found settings are set for the device to be used
473 * later in the device probing.
474 * Returns 0 if no settings found, 1 if they are.
476 int netdev_boot_setup_check(struct net_device *dev)
478 struct netdev_boot_setup *s = dev_boot_setup;
481 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
482 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
483 !strcmp(dev->name, s[i].name)) {
484 dev->irq = s[i].map.irq;
485 dev->base_addr = s[i].map.base_addr;
486 dev->mem_start = s[i].map.mem_start;
487 dev->mem_end = s[i].map.mem_end;
496 * netdev_boot_base - get address from boot time settings
497 * @prefix: prefix for network device
498 * @unit: id for network device
500 * Check boot time settings for the base address of device.
501 * The found settings are set for the device to be used
502 * later in the device probing.
503 * Returns 0 if no settings found.
505 unsigned long netdev_boot_base(const char *prefix, int unit)
507 const struct netdev_boot_setup *s = dev_boot_setup;
511 sprintf(name, "%s%d", prefix, unit);
514 * If device already registered then return base of 1
515 * to indicate not to probe for this interface
517 if (__dev_get_by_name(&init_net, name))
520 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
521 if (!strcmp(name, s[i].name))
522 return s[i].map.base_addr;
527 * Saves at boot time configured settings for any netdevice.
529 int __init netdev_boot_setup(char *str)
534 str = get_options(str, ARRAY_SIZE(ints), ints);
539 memset(&map, 0, sizeof(map));
543 map.base_addr = ints[2];
545 map.mem_start = ints[3];
547 map.mem_end = ints[4];
549 /* Add new entry to the list */
550 return netdev_boot_setup_add(str, &map);
553 __setup("netdev=", netdev_boot_setup);
555 /*******************************************************************************
557 Device Interface Subroutines
559 *******************************************************************************/
562 * __dev_get_by_name - find a device by its name
563 * @net: the applicable net namespace
564 * @name: name to find
566 * Find an interface by name. Must be called under RTNL semaphore
567 * or @dev_base_lock. If the name is found a pointer to the device
568 * is returned. If the name is not found then %NULL is returned. The
569 * reference counters are not incremented so the caller must be
570 * careful with locks.
573 struct net_device *__dev_get_by_name(struct net *net, const char *name)
575 struct hlist_node *p;
577 hlist_for_each(p, dev_name_hash(net, name)) {
578 struct net_device *dev
579 = hlist_entry(p, struct net_device, name_hlist);
580 if (!strncmp(dev->name, name, IFNAMSIZ))
587 * dev_get_by_name - find a device by its name
588 * @net: the applicable net namespace
589 * @name: name to find
591 * Find an interface by name. This can be called from any
592 * context and does its own locking. The returned handle has
593 * the usage count incremented and the caller must use dev_put() to
594 * release it when it is no longer needed. %NULL is returned if no
595 * matching device is found.
598 struct net_device *dev_get_by_name(struct net *net, const char *name)
600 struct net_device *dev;
602 read_lock(&dev_base_lock);
603 dev = __dev_get_by_name(net, name);
606 read_unlock(&dev_base_lock);
611 * __dev_get_by_index - find a device by its ifindex
612 * @net: the applicable net namespace
613 * @ifindex: index of device
615 * Search for an interface by index. Returns %NULL if the device
616 * is not found or a pointer to the device. The device has not
617 * had its reference counter increased so the caller must be careful
618 * about locking. The caller must hold either the RTNL semaphore
622 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
624 struct hlist_node *p;
626 hlist_for_each(p, dev_index_hash(net, ifindex)) {
627 struct net_device *dev
628 = hlist_entry(p, struct net_device, index_hlist);
629 if (dev->ifindex == ifindex)
637 * dev_get_by_index - find a device by its ifindex
638 * @net: the applicable net namespace
639 * @ifindex: index of device
641 * Search for an interface by index. Returns NULL if the device
642 * is not found or a pointer to the device. The device returned has
643 * had a reference added and the pointer is safe until the user calls
644 * dev_put to indicate they have finished with it.
647 struct net_device *dev_get_by_index(struct net *net, int ifindex)
649 struct net_device *dev;
651 read_lock(&dev_base_lock);
652 dev = __dev_get_by_index(net, ifindex);
655 read_unlock(&dev_base_lock);
660 * dev_getbyhwaddr - find a device by its hardware address
661 * @net: the applicable net namespace
662 * @type: media type of device
663 * @ha: hardware address
665 * Search for an interface by MAC address. Returns NULL if the device
666 * is not found or a pointer to the device. The caller must hold the
667 * rtnl semaphore. The returned device has not had its ref count increased
668 * and the caller must therefore be careful about locking
671 * If the API was consistent this would be __dev_get_by_hwaddr
674 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
676 struct net_device *dev;
680 for_each_netdev(net, dev)
681 if (dev->type == type &&
682 !memcmp(dev->dev_addr, ha, dev->addr_len))
688 EXPORT_SYMBOL(dev_getbyhwaddr);
690 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
692 struct net_device *dev;
695 for_each_netdev(net, dev)
696 if (dev->type == type)
702 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
704 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
706 struct net_device *dev;
709 dev = __dev_getfirstbyhwtype(net, type);
716 EXPORT_SYMBOL(dev_getfirstbyhwtype);
719 * dev_get_by_flags - find any device with given flags
720 * @net: the applicable net namespace
721 * @if_flags: IFF_* values
722 * @mask: bitmask of bits in if_flags to check
724 * Search for any interface with the given flags. Returns NULL if a device
725 * is not found or a pointer to the device. The device returned has
726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
730 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
732 struct net_device *dev, *ret;
735 read_lock(&dev_base_lock);
736 for_each_netdev(net, dev) {
737 if (((dev->flags ^ if_flags) & mask) == 0) {
743 read_unlock(&dev_base_lock);
748 * dev_valid_name - check if name is okay for network device
751 * Network device names need to be valid file names to
752 * to allow sysfs to work. We also disallow any kind of
755 int dev_valid_name(const char *name)
759 if (strlen(name) >= IFNAMSIZ)
761 if (!strcmp(name, ".") || !strcmp(name, ".."))
765 if (*name == '/' || isspace(*name))
773 * __dev_alloc_name - allocate a name for a device
774 * @net: network namespace to allocate the device name in
775 * @name: name format string
776 * @buf: scratch buffer and result name string
778 * Passed a format string - eg "lt%d" it will try and find a suitable
779 * id. It scans list of devices to build up a free map, then chooses
780 * the first empty slot. The caller must hold the dev_base or rtnl lock
781 * while allocating the name and adding the device in order to avoid
783 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
784 * Returns the number of the unit assigned or a negative errno code.
787 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
791 const int max_netdevices = 8*PAGE_SIZE;
792 unsigned long *inuse;
793 struct net_device *d;
795 p = strnchr(name, IFNAMSIZ-1, '%');
798 * Verify the string as this thing may have come from
799 * the user. There must be either one "%d" and no other "%"
802 if (p[1] != 'd' || strchr(p + 2, '%'))
805 /* Use one page as a bit array of possible slots */
806 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
810 for_each_netdev(net, d) {
811 if (!sscanf(d->name, name, &i))
813 if (i < 0 || i >= max_netdevices)
816 /* avoid cases where sscanf is not exact inverse of printf */
817 snprintf(buf, IFNAMSIZ, name, i);
818 if (!strncmp(buf, d->name, IFNAMSIZ))
822 i = find_first_zero_bit(inuse, max_netdevices);
823 free_page((unsigned long) inuse);
826 snprintf(buf, IFNAMSIZ, name, i);
827 if (!__dev_get_by_name(net, buf))
830 /* It is possible to run out of possible slots
831 * when the name is long and there isn't enough space left
832 * for the digits, or if all bits are used.
838 * dev_alloc_name - allocate a name for a device
840 * @name: name format string
842 * Passed a format string - eg "lt%d" it will try and find a suitable
843 * id. It scans list of devices to build up a free map, then chooses
844 * the first empty slot. The caller must hold the dev_base or rtnl lock
845 * while allocating the name and adding the device in order to avoid
847 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
848 * Returns the number of the unit assigned or a negative errno code.
851 int dev_alloc_name(struct net_device *dev, const char *name)
857 BUG_ON(!dev_net(dev));
859 ret = __dev_alloc_name(net, name, buf);
861 strlcpy(dev->name, buf, IFNAMSIZ);
867 * dev_change_name - change name of a device
869 * @newname: name (or format string) must be at least IFNAMSIZ
871 * Change name of a device, can pass format strings "eth%d".
874 int dev_change_name(struct net_device *dev, char *newname)
876 char oldname[IFNAMSIZ];
882 BUG_ON(!dev_net(dev));
885 if (dev->flags & IFF_UP)
888 if (!dev_valid_name(newname))
891 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
894 memcpy(oldname, dev->name, IFNAMSIZ);
896 if (strchr(newname, '%')) {
897 err = dev_alloc_name(dev, newname);
900 strcpy(newname, dev->name);
902 else if (__dev_get_by_name(net, newname))
905 strlcpy(dev->name, newname, IFNAMSIZ);
908 err = device_rename(&dev->dev, dev->name);
910 memcpy(dev->name, oldname, IFNAMSIZ);
914 write_lock_bh(&dev_base_lock);
915 hlist_del(&dev->name_hlist);
916 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
917 write_unlock_bh(&dev_base_lock);
919 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
920 ret = notifier_to_errno(ret);
925 "%s: name change rollback failed: %d.\n",
929 memcpy(dev->name, oldname, IFNAMSIZ);
938 * netdev_features_change - device changes features
939 * @dev: device to cause notification
941 * Called to indicate a device has changed features.
943 void netdev_features_change(struct net_device *dev)
945 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
947 EXPORT_SYMBOL(netdev_features_change);
950 * netdev_state_change - device changes state
951 * @dev: device to cause notification
953 * Called to indicate a device has changed state. This function calls
954 * the notifier chains for netdev_chain and sends a NEWLINK message
955 * to the routing socket.
957 void netdev_state_change(struct net_device *dev)
959 if (dev->flags & IFF_UP) {
960 call_netdevice_notifiers(NETDEV_CHANGE, dev);
961 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
965 void netdev_bonding_change(struct net_device *dev)
967 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
969 EXPORT_SYMBOL(netdev_bonding_change);
972 * dev_load - load a network module
973 * @net: the applicable net namespace
974 * @name: name of interface
976 * If a network interface is not present and the process has suitable
977 * privileges this function loads the module. If module loading is not
978 * available in this kernel then it becomes a nop.
981 void dev_load(struct net *net, const char *name)
983 struct net_device *dev;
985 read_lock(&dev_base_lock);
986 dev = __dev_get_by_name(net, name);
987 read_unlock(&dev_base_lock);
989 if (!dev && capable(CAP_SYS_MODULE))
990 request_module("%s", name);
994 * dev_open - prepare an interface for use.
995 * @dev: device to open
997 * Takes a device from down to up state. The device's private open
998 * function is invoked and then the multicast lists are loaded. Finally
999 * the device is moved into the up state and a %NETDEV_UP message is
1000 * sent to the netdev notifier chain.
1002 * Calling this function on an active interface is a nop. On a failure
1003 * a negative errno code is returned.
1005 int dev_open(struct net_device *dev)
1015 if (dev->flags & IFF_UP)
1019 * Is it even present?
1021 if (!netif_device_present(dev))
1025 * Call device private open method
1027 set_bit(__LINK_STATE_START, &dev->state);
1029 if (dev->validate_addr)
1030 ret = dev->validate_addr(dev);
1032 if (!ret && dev->open)
1033 ret = dev->open(dev);
1036 * If it went open OK then:
1040 clear_bit(__LINK_STATE_START, &dev->state);
1045 dev->flags |= IFF_UP;
1048 * Initialize multicasting status
1050 dev_set_rx_mode(dev);
1053 * Wakeup transmit queue engine
1058 * ... and announce new interface.
1060 call_netdevice_notifiers(NETDEV_UP, dev);
1067 * dev_close - shutdown an interface.
1068 * @dev: device to shutdown
1070 * This function moves an active device into down state. A
1071 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1072 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1075 int dev_close(struct net_device *dev)
1081 if (!(dev->flags & IFF_UP))
1085 * Tell people we are going down, so that they can
1086 * prepare to death, when device is still operating.
1088 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1090 clear_bit(__LINK_STATE_START, &dev->state);
1092 /* Synchronize to scheduled poll. We cannot touch poll list,
1093 * it can be even on different cpu. So just clear netif_running().
1095 * dev->stop() will invoke napi_disable() on all of it's
1096 * napi_struct instances on this device.
1098 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1100 dev_deactivate(dev);
1103 * Call the device specific close. This cannot fail.
1104 * Only if device is UP
1106 * We allow it to be called even after a DETACH hot-plug
1113 * Device is now down.
1116 dev->flags &= ~IFF_UP;
1119 * Tell people we are down
1121 call_netdevice_notifiers(NETDEV_DOWN, dev);
1128 * dev_disable_lro - disable Large Receive Offload on a device
1131 * Disable Large Receive Offload (LRO) on a net device. Must be
1132 * called under RTNL. This is needed if received packets may be
1133 * forwarded to another interface.
1135 void dev_disable_lro(struct net_device *dev)
1137 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1138 dev->ethtool_ops->set_flags) {
1139 u32 flags = dev->ethtool_ops->get_flags(dev);
1140 if (flags & ETH_FLAG_LRO) {
1141 flags &= ~ETH_FLAG_LRO;
1142 dev->ethtool_ops->set_flags(dev, flags);
1145 WARN_ON(dev->features & NETIF_F_LRO);
1147 EXPORT_SYMBOL(dev_disable_lro);
1150 static int dev_boot_phase = 1;
1153 * Device change register/unregister. These are not inline or static
1154 * as we export them to the world.
1158 * register_netdevice_notifier - register a network notifier block
1161 * Register a notifier to be called when network device events occur.
1162 * The notifier passed is linked into the kernel structures and must
1163 * not be reused until it has been unregistered. A negative errno code
1164 * is returned on a failure.
1166 * When registered all registration and up events are replayed
1167 * to the new notifier to allow device to have a race free
1168 * view of the network device list.
1171 int register_netdevice_notifier(struct notifier_block *nb)
1173 struct net_device *dev;
1174 struct net_device *last;
1179 err = raw_notifier_chain_register(&netdev_chain, nb);
1185 for_each_netdev(net, dev) {
1186 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1187 err = notifier_to_errno(err);
1191 if (!(dev->flags & IFF_UP))
1194 nb->notifier_call(nb, NETDEV_UP, dev);
1205 for_each_netdev(net, dev) {
1209 if (dev->flags & IFF_UP) {
1210 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1211 nb->notifier_call(nb, NETDEV_DOWN, dev);
1213 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1217 raw_notifier_chain_unregister(&netdev_chain, nb);
1222 * unregister_netdevice_notifier - unregister a network notifier block
1225 * Unregister a notifier previously registered by
1226 * register_netdevice_notifier(). The notifier is unlinked into the
1227 * kernel structures and may then be reused. A negative errno code
1228 * is returned on a failure.
1231 int unregister_netdevice_notifier(struct notifier_block *nb)
1236 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1242 * call_netdevice_notifiers - call all network notifier blocks
1243 * @val: value passed unmodified to notifier function
1244 * @dev: net_device pointer passed unmodified to notifier function
1246 * Call all network notifier blocks. Parameters and return value
1247 * are as for raw_notifier_call_chain().
1250 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1252 return raw_notifier_call_chain(&netdev_chain, val, dev);
1255 /* When > 0 there are consumers of rx skb time stamps */
1256 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1258 void net_enable_timestamp(void)
1260 atomic_inc(&netstamp_needed);
1263 void net_disable_timestamp(void)
1265 atomic_dec(&netstamp_needed);
1268 static inline void net_timestamp(struct sk_buff *skb)
1270 if (atomic_read(&netstamp_needed))
1271 __net_timestamp(skb);
1273 skb->tstamp.tv64 = 0;
1277 * Support routine. Sends outgoing frames to any network
1278 * taps currently in use.
1281 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1283 struct packet_type *ptype;
1288 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1289 /* Never send packets back to the socket
1290 * they originated from - MvS (miquels@drinkel.ow.org)
1292 if ((ptype->dev == dev || !ptype->dev) &&
1293 (ptype->af_packet_priv == NULL ||
1294 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1295 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1299 /* skb->nh should be correctly
1300 set by sender, so that the second statement is
1301 just protection against buggy protocols.
1303 skb_reset_mac_header(skb2);
1305 if (skb_network_header(skb2) < skb2->data ||
1306 skb2->network_header > skb2->tail) {
1307 if (net_ratelimit())
1308 printk(KERN_CRIT "protocol %04x is "
1310 skb2->protocol, dev->name);
1311 skb_reset_network_header(skb2);
1314 skb2->transport_header = skb2->network_header;
1315 skb2->pkt_type = PACKET_OUTGOING;
1316 ptype->func(skb2, skb->dev, ptype, skb->dev);
1323 void __netif_schedule(struct netdev_queue *txq)
1325 struct net_device *dev = txq->dev;
1327 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1328 struct softnet_data *sd;
1329 unsigned long flags;
1331 local_irq_save(flags);
1332 sd = &__get_cpu_var(softnet_data);
1333 txq->next_sched = sd->output_queue;
1334 sd->output_queue = txq;
1335 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1336 local_irq_restore(flags);
1339 EXPORT_SYMBOL(__netif_schedule);
1341 void dev_kfree_skb_irq(struct sk_buff *skb)
1343 if (atomic_dec_and_test(&skb->users)) {
1344 struct softnet_data *sd;
1345 unsigned long flags;
1347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 skb->next = sd->completion_queue;
1350 sd->completion_queue = skb;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1355 EXPORT_SYMBOL(dev_kfree_skb_irq);
1357 void dev_kfree_skb_any(struct sk_buff *skb)
1359 if (in_irq() || irqs_disabled())
1360 dev_kfree_skb_irq(skb);
1364 EXPORT_SYMBOL(dev_kfree_skb_any);
1368 * netif_device_detach - mark device as removed
1369 * @dev: network device
1371 * Mark device as removed from system and therefore no longer available.
1373 void netif_device_detach(struct net_device *dev)
1375 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1376 netif_running(dev)) {
1377 netif_stop_queue(dev);
1380 EXPORT_SYMBOL(netif_device_detach);
1383 * netif_device_attach - mark device as attached
1384 * @dev: network device
1386 * Mark device as attached from system and restart if needed.
1388 void netif_device_attach(struct net_device *dev)
1390 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1391 netif_running(dev)) {
1392 netif_wake_queue(dev);
1393 __netdev_watchdog_up(dev);
1396 EXPORT_SYMBOL(netif_device_attach);
1398 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1400 return ((features & NETIF_F_GEN_CSUM) ||
1401 ((features & NETIF_F_IP_CSUM) &&
1402 protocol == htons(ETH_P_IP)) ||
1403 ((features & NETIF_F_IPV6_CSUM) &&
1404 protocol == htons(ETH_P_IPV6)));
1407 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1409 if (can_checksum_protocol(dev->features, skb->protocol))
1412 if (skb->protocol == htons(ETH_P_8021Q)) {
1413 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1414 if (can_checksum_protocol(dev->features & dev->vlan_features,
1415 veh->h_vlan_encapsulated_proto))
1423 * Invalidate hardware checksum when packet is to be mangled, and
1424 * complete checksum manually on outgoing path.
1426 int skb_checksum_help(struct sk_buff *skb)
1429 int ret = 0, offset;
1431 if (skb->ip_summed == CHECKSUM_COMPLETE)
1432 goto out_set_summed;
1434 if (unlikely(skb_shinfo(skb)->gso_size)) {
1435 /* Let GSO fix up the checksum. */
1436 goto out_set_summed;
1439 offset = skb->csum_start - skb_headroom(skb);
1440 BUG_ON(offset >= skb_headlen(skb));
1441 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1443 offset += skb->csum_offset;
1444 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1446 if (skb_cloned(skb) &&
1447 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1448 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1453 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1455 skb->ip_summed = CHECKSUM_NONE;
1461 * skb_gso_segment - Perform segmentation on skb.
1462 * @skb: buffer to segment
1463 * @features: features for the output path (see dev->features)
1465 * This function segments the given skb and returns a list of segments.
1467 * It may return NULL if the skb requires no segmentation. This is
1468 * only possible when GSO is used for verifying header integrity.
1470 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1472 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1473 struct packet_type *ptype;
1474 __be16 type = skb->protocol;
1477 BUG_ON(skb_shinfo(skb)->frag_list);
1479 skb_reset_mac_header(skb);
1480 skb->mac_len = skb->network_header - skb->mac_header;
1481 __skb_pull(skb, skb->mac_len);
1483 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
1484 if (skb_header_cloned(skb) &&
1485 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1486 return ERR_PTR(err);
1490 list_for_each_entry_rcu(ptype,
1491 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1492 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1493 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1494 err = ptype->gso_send_check(skb);
1495 segs = ERR_PTR(err);
1496 if (err || skb_gso_ok(skb, features))
1498 __skb_push(skb, (skb->data -
1499 skb_network_header(skb)));
1501 segs = ptype->gso_segment(skb, features);
1507 __skb_push(skb, skb->data - skb_mac_header(skb));
1512 EXPORT_SYMBOL(skb_gso_segment);
1514 /* Take action when hardware reception checksum errors are detected. */
1516 void netdev_rx_csum_fault(struct net_device *dev)
1518 if (net_ratelimit()) {
1519 printk(KERN_ERR "%s: hw csum failure.\n",
1520 dev ? dev->name : "<unknown>");
1524 EXPORT_SYMBOL(netdev_rx_csum_fault);
1527 /* Actually, we should eliminate this check as soon as we know, that:
1528 * 1. IOMMU is present and allows to map all the memory.
1529 * 2. No high memory really exists on this machine.
1532 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1534 #ifdef CONFIG_HIGHMEM
1537 if (dev->features & NETIF_F_HIGHDMA)
1540 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1541 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1549 void (*destructor)(struct sk_buff *skb);
1552 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1554 static void dev_gso_skb_destructor(struct sk_buff *skb)
1556 struct dev_gso_cb *cb;
1559 struct sk_buff *nskb = skb->next;
1561 skb->next = nskb->next;
1564 } while (skb->next);
1566 cb = DEV_GSO_CB(skb);
1568 cb->destructor(skb);
1572 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1573 * @skb: buffer to segment
1575 * This function segments the given skb and stores the list of segments
1578 static int dev_gso_segment(struct sk_buff *skb)
1580 struct net_device *dev = skb->dev;
1581 struct sk_buff *segs;
1582 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1585 segs = skb_gso_segment(skb, features);
1587 /* Verifying header integrity only. */
1592 return PTR_ERR(segs);
1595 DEV_GSO_CB(skb)->destructor = skb->destructor;
1596 skb->destructor = dev_gso_skb_destructor;
1601 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1603 if (likely(!skb->next)) {
1604 if (!list_empty(&ptype_all))
1605 dev_queue_xmit_nit(skb, dev);
1607 if (netif_needs_gso(dev, skb)) {
1608 if (unlikely(dev_gso_segment(skb)))
1614 return dev->hard_start_xmit(skb, dev);
1619 struct sk_buff *nskb = skb->next;
1622 skb->next = nskb->next;
1624 rc = dev->hard_start_xmit(nskb, dev);
1626 nskb->next = skb->next;
1630 if (unlikely((netif_queue_stopped(dev) ||
1631 netif_subqueue_stopped(dev, skb)) &&
1633 return NETDEV_TX_BUSY;
1634 } while (skb->next);
1636 skb->destructor = DEV_GSO_CB(skb)->destructor;
1644 * dev_queue_xmit - transmit a buffer
1645 * @skb: buffer to transmit
1647 * Queue a buffer for transmission to a network device. The caller must
1648 * have set the device and priority and built the buffer before calling
1649 * this function. The function can be called from an interrupt.
1651 * A negative errno code is returned on a failure. A success does not
1652 * guarantee the frame will be transmitted as it may be dropped due
1653 * to congestion or traffic shaping.
1655 * -----------------------------------------------------------------------------------
1656 * I notice this method can also return errors from the queue disciplines,
1657 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1660 * Regardless of the return value, the skb is consumed, so it is currently
1661 * difficult to retry a send to this method. (You can bump the ref count
1662 * before sending to hold a reference for retry if you are careful.)
1664 * When calling this method, interrupts MUST be enabled. This is because
1665 * the BH enable code must have IRQs enabled so that it will not deadlock.
1669 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb)
1672 return netdev_get_tx_queue(dev, 0);
1675 int dev_queue_xmit(struct sk_buff *skb)
1677 struct net_device *dev = skb->dev;
1678 struct netdev_queue *txq;
1682 /* GSO will handle the following emulations directly. */
1683 if (netif_needs_gso(dev, skb))
1686 if (skb_shinfo(skb)->frag_list &&
1687 !(dev->features & NETIF_F_FRAGLIST) &&
1688 __skb_linearize(skb))
1691 /* Fragmented skb is linearized if device does not support SG,
1692 * or if at least one of fragments is in highmem and device
1693 * does not support DMA from it.
1695 if (skb_shinfo(skb)->nr_frags &&
1696 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1697 __skb_linearize(skb))
1700 /* If packet is not checksummed and device does not support
1701 * checksumming for this protocol, complete checksumming here.
1703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1704 skb_set_transport_header(skb, skb->csum_start -
1706 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1711 txq = dev_pick_tx(dev, skb);
1712 spin_lock_prefetch(&txq->lock);
1714 /* Disable soft irqs for various locks below. Also
1715 * stops preemption for RCU.
1719 /* Updates of qdisc are serialized by queue->lock.
1720 * The struct Qdisc which is pointed to by qdisc is now a
1721 * rcu structure - it may be accessed without acquiring
1722 * a lock (but the structure may be stale.) The freeing of the
1723 * qdisc will be deferred until it's known that there are no
1724 * more references to it.
1726 * If the qdisc has an enqueue function, we still need to
1727 * hold the queue->lock before calling it, since queue->lock
1728 * also serializes access to the device queue.
1731 q = rcu_dereference(txq->qdisc);
1732 #ifdef CONFIG_NET_CLS_ACT
1733 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1736 /* Grab device queue */
1737 spin_lock(&txq->lock);
1740 /* reset queue_mapping to zero */
1741 skb_set_queue_mapping(skb, 0);
1742 rc = q->enqueue(skb, q);
1744 spin_unlock(&txq->lock);
1746 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1749 spin_unlock(&txq->lock);
1752 /* The device has no queue. Common case for software devices:
1753 loopback, all the sorts of tunnels...
1755 Really, it is unlikely that netif_tx_lock protection is necessary
1756 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1758 However, it is possible, that they rely on protection
1761 Check this and shot the lock. It is not prone from deadlocks.
1762 Either shot noqueue qdisc, it is even simpler 8)
1764 if (dev->flags & IFF_UP) {
1765 int cpu = smp_processor_id(); /* ok because BHs are off */
1767 if (txq->xmit_lock_owner != cpu) {
1769 HARD_TX_LOCK(dev, txq, cpu);
1771 if (!netif_queue_stopped(dev) &&
1772 !netif_subqueue_stopped(dev, skb)) {
1774 if (!dev_hard_start_xmit(skb, dev)) {
1775 HARD_TX_UNLOCK(dev, txq);
1779 HARD_TX_UNLOCK(dev, txq);
1780 if (net_ratelimit())
1781 printk(KERN_CRIT "Virtual device %s asks to "
1782 "queue packet!\n", dev->name);
1784 /* Recursion is detected! It is possible,
1786 if (net_ratelimit())
1787 printk(KERN_CRIT "Dead loop on virtual device "
1788 "%s, fix it urgently!\n", dev->name);
1793 rcu_read_unlock_bh();
1799 rcu_read_unlock_bh();
1804 /*=======================================================================
1806 =======================================================================*/
1808 int netdev_max_backlog __read_mostly = 1000;
1809 int netdev_budget __read_mostly = 300;
1810 int weight_p __read_mostly = 64; /* old backlog weight */
1812 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1816 * netif_rx - post buffer to the network code
1817 * @skb: buffer to post
1819 * This function receives a packet from a device driver and queues it for
1820 * the upper (protocol) levels to process. It always succeeds. The buffer
1821 * may be dropped during processing for congestion control or by the
1825 * NET_RX_SUCCESS (no congestion)
1826 * NET_RX_DROP (packet was dropped)
1830 int netif_rx(struct sk_buff *skb)
1832 struct softnet_data *queue;
1833 unsigned long flags;
1835 /* if netpoll wants it, pretend we never saw it */
1836 if (netpoll_rx(skb))
1839 if (!skb->tstamp.tv64)
1843 * The code is rearranged so that the path is the most
1844 * short when CPU is congested, but is still operating.
1846 local_irq_save(flags);
1847 queue = &__get_cpu_var(softnet_data);
1849 __get_cpu_var(netdev_rx_stat).total++;
1850 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1851 if (queue->input_pkt_queue.qlen) {
1854 __skb_queue_tail(&queue->input_pkt_queue, skb);
1855 local_irq_restore(flags);
1856 return NET_RX_SUCCESS;
1859 napi_schedule(&queue->backlog);
1863 __get_cpu_var(netdev_rx_stat).dropped++;
1864 local_irq_restore(flags);
1870 int netif_rx_ni(struct sk_buff *skb)
1875 err = netif_rx(skb);
1876 if (local_softirq_pending())
1883 EXPORT_SYMBOL(netif_rx_ni);
1885 static inline struct net_device *skb_bond(struct sk_buff *skb)
1887 struct net_device *dev = skb->dev;
1890 if (skb_bond_should_drop(skb)) {
1894 skb->dev = dev->master;
1901 static void net_tx_action(struct softirq_action *h)
1903 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1905 if (sd->completion_queue) {
1906 struct sk_buff *clist;
1908 local_irq_disable();
1909 clist = sd->completion_queue;
1910 sd->completion_queue = NULL;
1914 struct sk_buff *skb = clist;
1915 clist = clist->next;
1917 BUG_TRAP(!atomic_read(&skb->users));
1922 if (sd->output_queue) {
1923 struct netdev_queue *head;
1925 local_irq_disable();
1926 head = sd->output_queue;
1927 sd->output_queue = NULL;
1931 struct netdev_queue *txq = head;
1932 struct net_device *dev = txq->dev;
1933 head = head->next_sched;
1935 smp_mb__before_clear_bit();
1936 clear_bit(__LINK_STATE_SCHED, &dev->state);
1938 if (spin_trylock(&txq->lock)) {
1940 spin_unlock(&txq->lock);
1942 netif_schedule_queue(txq);
1948 static inline int deliver_skb(struct sk_buff *skb,
1949 struct packet_type *pt_prev,
1950 struct net_device *orig_dev)
1952 atomic_inc(&skb->users);
1953 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1956 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
1957 /* These hooks defined here for ATM */
1959 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1960 unsigned char *addr);
1961 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1964 * If bridge module is loaded call bridging hook.
1965 * returns NULL if packet was consumed.
1967 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1968 struct sk_buff *skb) __read_mostly;
1969 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1970 struct packet_type **pt_prev, int *ret,
1971 struct net_device *orig_dev)
1973 struct net_bridge_port *port;
1975 if (skb->pkt_type == PACKET_LOOPBACK ||
1976 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1980 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1984 return br_handle_frame_hook(port, skb);
1987 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1990 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1991 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1992 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1994 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1995 struct packet_type **pt_prev,
1997 struct net_device *orig_dev)
1999 if (skb->dev->macvlan_port == NULL)
2003 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2006 return macvlan_handle_frame_hook(skb);
2009 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2012 #ifdef CONFIG_NET_CLS_ACT
2013 /* TODO: Maybe we should just force sch_ingress to be compiled in
2014 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2015 * a compare and 2 stores extra right now if we dont have it on
2016 * but have CONFIG_NET_CLS_ACT
2017 * NOTE: This doesnt stop any functionality; if you dont have
2018 * the ingress scheduler, you just cant add policies on ingress.
2021 static int ing_filter(struct sk_buff *skb)
2023 struct net_device *dev = skb->dev;
2024 u32 ttl = G_TC_RTTL(skb->tc_verd);
2025 struct netdev_queue *rxq;
2026 int result = TC_ACT_OK;
2029 if (MAX_RED_LOOP < ttl++) {
2031 "Redir loop detected Dropping packet (%d->%d)\n",
2032 skb->iif, dev->ifindex);
2036 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2037 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2039 rxq = &dev->rx_queue;
2041 spin_lock(&rxq->lock);
2042 if ((q = rxq->qdisc) != NULL)
2043 result = q->enqueue(skb, q);
2044 spin_unlock(&rxq->lock);
2049 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2050 struct packet_type **pt_prev,
2051 int *ret, struct net_device *orig_dev)
2053 if (!skb->dev->rx_queue.qdisc)
2057 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2060 /* Huh? Why does turning on AF_PACKET affect this? */
2061 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2064 switch (ing_filter(skb)) {
2078 * netif_nit_deliver - deliver received packets to network taps
2081 * This function is used to deliver incoming packets to network
2082 * taps. It should be used when the normal netif_receive_skb path
2083 * is bypassed, for example because of VLAN acceleration.
2085 void netif_nit_deliver(struct sk_buff *skb)
2087 struct packet_type *ptype;
2089 if (list_empty(&ptype_all))
2092 skb_reset_network_header(skb);
2093 skb_reset_transport_header(skb);
2094 skb->mac_len = skb->network_header - skb->mac_header;
2097 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2098 if (!ptype->dev || ptype->dev == skb->dev)
2099 deliver_skb(skb, ptype, skb->dev);
2105 * netif_receive_skb - process receive buffer from network
2106 * @skb: buffer to process
2108 * netif_receive_skb() is the main receive data processing function.
2109 * It always succeeds. The buffer may be dropped during processing
2110 * for congestion control or by the protocol layers.
2112 * This function may only be called from softirq context and interrupts
2113 * should be enabled.
2115 * Return values (usually ignored):
2116 * NET_RX_SUCCESS: no congestion
2117 * NET_RX_DROP: packet was dropped
2119 int netif_receive_skb(struct sk_buff *skb)
2121 struct packet_type *ptype, *pt_prev;
2122 struct net_device *orig_dev;
2123 int ret = NET_RX_DROP;
2126 /* if we've gotten here through NAPI, check netpoll */
2127 if (netpoll_receive_skb(skb))
2130 if (!skb->tstamp.tv64)
2134 skb->iif = skb->dev->ifindex;
2136 orig_dev = skb_bond(skb);
2141 __get_cpu_var(netdev_rx_stat).total++;
2143 skb_reset_network_header(skb);
2144 skb_reset_transport_header(skb);
2145 skb->mac_len = skb->network_header - skb->mac_header;
2151 /* Don't receive packets in an exiting network namespace */
2152 if (!net_alive(dev_net(skb->dev)))
2155 #ifdef CONFIG_NET_CLS_ACT
2156 if (skb->tc_verd & TC_NCLS) {
2157 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2162 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2163 if (!ptype->dev || ptype->dev == skb->dev) {
2165 ret = deliver_skb(skb, pt_prev, orig_dev);
2170 #ifdef CONFIG_NET_CLS_ACT
2171 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2177 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2180 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2184 type = skb->protocol;
2185 list_for_each_entry_rcu(ptype,
2186 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2187 if (ptype->type == type &&
2188 (!ptype->dev || ptype->dev == skb->dev)) {
2190 ret = deliver_skb(skb, pt_prev, orig_dev);
2196 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2199 /* Jamal, now you will not able to escape explaining
2200 * me how you were going to use this. :-)
2210 static int process_backlog(struct napi_struct *napi, int quota)
2213 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2214 unsigned long start_time = jiffies;
2216 napi->weight = weight_p;
2218 struct sk_buff *skb;
2219 struct net_device *dev;
2221 local_irq_disable();
2222 skb = __skb_dequeue(&queue->input_pkt_queue);
2224 __napi_complete(napi);
2233 netif_receive_skb(skb);
2236 } while (++work < quota && jiffies == start_time);
2242 * __napi_schedule - schedule for receive
2243 * @n: entry to schedule
2245 * The entry's receive function will be scheduled to run
2247 void __napi_schedule(struct napi_struct *n)
2249 unsigned long flags;
2251 local_irq_save(flags);
2252 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2253 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2254 local_irq_restore(flags);
2256 EXPORT_SYMBOL(__napi_schedule);
2259 static void net_rx_action(struct softirq_action *h)
2261 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2262 unsigned long start_time = jiffies;
2263 int budget = netdev_budget;
2266 local_irq_disable();
2268 while (!list_empty(list)) {
2269 struct napi_struct *n;
2272 /* If softirq window is exhuasted then punt.
2274 * Note that this is a slight policy change from the
2275 * previous NAPI code, which would allow up to 2
2276 * jiffies to pass before breaking out. The test
2277 * used to be "jiffies - start_time > 1".
2279 if (unlikely(budget <= 0 || jiffies != start_time))
2284 /* Even though interrupts have been re-enabled, this
2285 * access is safe because interrupts can only add new
2286 * entries to the tail of this list, and only ->poll()
2287 * calls can remove this head entry from the list.
2289 n = list_entry(list->next, struct napi_struct, poll_list);
2291 have = netpoll_poll_lock(n);
2295 /* This NAPI_STATE_SCHED test is for avoiding a race
2296 * with netpoll's poll_napi(). Only the entity which
2297 * obtains the lock and sees NAPI_STATE_SCHED set will
2298 * actually make the ->poll() call. Therefore we avoid
2299 * accidently calling ->poll() when NAPI is not scheduled.
2302 if (test_bit(NAPI_STATE_SCHED, &n->state))
2303 work = n->poll(n, weight);
2305 WARN_ON_ONCE(work > weight);
2309 local_irq_disable();
2311 /* Drivers must not modify the NAPI state if they
2312 * consume the entire weight. In such cases this code
2313 * still "owns" the NAPI instance and therefore can
2314 * move the instance around on the list at-will.
2316 if (unlikely(work == weight)) {
2317 if (unlikely(napi_disable_pending(n)))
2320 list_move_tail(&n->poll_list, list);
2323 netpoll_poll_unlock(have);
2328 #ifdef CONFIG_NET_DMA
2330 * There may not be any more sk_buffs coming right now, so push
2331 * any pending DMA copies to hardware
2333 if (!cpus_empty(net_dma.channel_mask)) {
2335 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2336 struct dma_chan *chan = net_dma.channels[chan_idx];
2338 dma_async_memcpy_issue_pending(chan);
2346 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2347 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2351 static gifconf_func_t * gifconf_list [NPROTO];
2354 * register_gifconf - register a SIOCGIF handler
2355 * @family: Address family
2356 * @gifconf: Function handler
2358 * Register protocol dependent address dumping routines. The handler
2359 * that is passed must not be freed or reused until it has been replaced
2360 * by another handler.
2362 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2364 if (family >= NPROTO)
2366 gifconf_list[family] = gifconf;
2372 * Map an interface index to its name (SIOCGIFNAME)
2376 * We need this ioctl for efficient implementation of the
2377 * if_indextoname() function required by the IPv6 API. Without
2378 * it, we would have to search all the interfaces to find a
2382 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2384 struct net_device *dev;
2388 * Fetch the caller's info block.
2391 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2394 read_lock(&dev_base_lock);
2395 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2397 read_unlock(&dev_base_lock);
2401 strcpy(ifr.ifr_name, dev->name);
2402 read_unlock(&dev_base_lock);
2404 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2410 * Perform a SIOCGIFCONF call. This structure will change
2411 * size eventually, and there is nothing I can do about it.
2412 * Thus we will need a 'compatibility mode'.
2415 static int dev_ifconf(struct net *net, char __user *arg)
2418 struct net_device *dev;
2425 * Fetch the caller's info block.
2428 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2435 * Loop over the interfaces, and write an info block for each.
2439 for_each_netdev(net, dev) {
2440 for (i = 0; i < NPROTO; i++) {
2441 if (gifconf_list[i]) {
2444 done = gifconf_list[i](dev, NULL, 0);
2446 done = gifconf_list[i](dev, pos + total,
2456 * All done. Write the updated control block back to the caller.
2458 ifc.ifc_len = total;
2461 * Both BSD and Solaris return 0 here, so we do too.
2463 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2466 #ifdef CONFIG_PROC_FS
2468 * This is invoked by the /proc filesystem handler to display a device
2471 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2472 __acquires(dev_base_lock)
2474 struct net *net = seq_file_net(seq);
2476 struct net_device *dev;
2478 read_lock(&dev_base_lock);
2480 return SEQ_START_TOKEN;
2483 for_each_netdev(net, dev)
2490 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2492 struct net *net = seq_file_net(seq);
2494 return v == SEQ_START_TOKEN ?
2495 first_net_device(net) : next_net_device((struct net_device *)v);
2498 void dev_seq_stop(struct seq_file *seq, void *v)
2499 __releases(dev_base_lock)
2501 read_unlock(&dev_base_lock);
2504 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2506 struct net_device_stats *stats = dev->get_stats(dev);
2508 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2509 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2510 dev->name, stats->rx_bytes, stats->rx_packets,
2512 stats->rx_dropped + stats->rx_missed_errors,
2513 stats->rx_fifo_errors,
2514 stats->rx_length_errors + stats->rx_over_errors +
2515 stats->rx_crc_errors + stats->rx_frame_errors,
2516 stats->rx_compressed, stats->multicast,
2517 stats->tx_bytes, stats->tx_packets,
2518 stats->tx_errors, stats->tx_dropped,
2519 stats->tx_fifo_errors, stats->collisions,
2520 stats->tx_carrier_errors +
2521 stats->tx_aborted_errors +
2522 stats->tx_window_errors +
2523 stats->tx_heartbeat_errors,
2524 stats->tx_compressed);
2528 * Called from the PROCfs module. This now uses the new arbitrary sized
2529 * /proc/net interface to create /proc/net/dev
2531 static int dev_seq_show(struct seq_file *seq, void *v)
2533 if (v == SEQ_START_TOKEN)
2534 seq_puts(seq, "Inter-| Receive "
2536 " face |bytes packets errs drop fifo frame "
2537 "compressed multicast|bytes packets errs "
2538 "drop fifo colls carrier compressed\n");
2540 dev_seq_printf_stats(seq, v);
2544 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2546 struct netif_rx_stats *rc = NULL;
2548 while (*pos < nr_cpu_ids)
2549 if (cpu_online(*pos)) {
2550 rc = &per_cpu(netdev_rx_stat, *pos);
2557 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2559 return softnet_get_online(pos);
2562 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2565 return softnet_get_online(pos);
2568 static void softnet_seq_stop(struct seq_file *seq, void *v)
2572 static int softnet_seq_show(struct seq_file *seq, void *v)
2574 struct netif_rx_stats *s = v;
2576 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
2577 s->total, s->dropped, s->time_squeeze, 0,
2578 0, 0, 0, 0, /* was fastroute */
2583 static const struct seq_operations dev_seq_ops = {
2584 .start = dev_seq_start,
2585 .next = dev_seq_next,
2586 .stop = dev_seq_stop,
2587 .show = dev_seq_show,
2590 static int dev_seq_open(struct inode *inode, struct file *file)
2592 return seq_open_net(inode, file, &dev_seq_ops,
2593 sizeof(struct seq_net_private));
2596 static const struct file_operations dev_seq_fops = {
2597 .owner = THIS_MODULE,
2598 .open = dev_seq_open,
2600 .llseek = seq_lseek,
2601 .release = seq_release_net,
2604 static const struct seq_operations softnet_seq_ops = {
2605 .start = softnet_seq_start,
2606 .next = softnet_seq_next,
2607 .stop = softnet_seq_stop,
2608 .show = softnet_seq_show,
2611 static int softnet_seq_open(struct inode *inode, struct file *file)
2613 return seq_open(file, &softnet_seq_ops);
2616 static const struct file_operations softnet_seq_fops = {
2617 .owner = THIS_MODULE,
2618 .open = softnet_seq_open,
2620 .llseek = seq_lseek,
2621 .release = seq_release,
2624 static void *ptype_get_idx(loff_t pos)
2626 struct packet_type *pt = NULL;
2630 list_for_each_entry_rcu(pt, &ptype_all, list) {
2636 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
2637 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2646 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2650 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2653 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2655 struct packet_type *pt;
2656 struct list_head *nxt;
2660 if (v == SEQ_START_TOKEN)
2661 return ptype_get_idx(0);
2664 nxt = pt->list.next;
2665 if (pt->type == htons(ETH_P_ALL)) {
2666 if (nxt != &ptype_all)
2669 nxt = ptype_base[0].next;
2671 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
2673 while (nxt == &ptype_base[hash]) {
2674 if (++hash >= PTYPE_HASH_SIZE)
2676 nxt = ptype_base[hash].next;
2679 return list_entry(nxt, struct packet_type, list);
2682 static void ptype_seq_stop(struct seq_file *seq, void *v)
2688 static void ptype_seq_decode(struct seq_file *seq, void *sym)
2690 #ifdef CONFIG_KALLSYMS
2691 unsigned long offset = 0, symsize;
2692 const char *symname;
2696 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2703 modname = delim = "";
2704 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2710 seq_printf(seq, "[%p]", sym);
2713 static int ptype_seq_show(struct seq_file *seq, void *v)
2715 struct packet_type *pt = v;
2717 if (v == SEQ_START_TOKEN)
2718 seq_puts(seq, "Type Device Function\n");
2719 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2720 if (pt->type == htons(ETH_P_ALL))
2721 seq_puts(seq, "ALL ");
2723 seq_printf(seq, "%04x", ntohs(pt->type));
2725 seq_printf(seq, " %-8s ",
2726 pt->dev ? pt->dev->name : "");
2727 ptype_seq_decode(seq, pt->func);
2728 seq_putc(seq, '\n');
2734 static const struct seq_operations ptype_seq_ops = {
2735 .start = ptype_seq_start,
2736 .next = ptype_seq_next,
2737 .stop = ptype_seq_stop,
2738 .show = ptype_seq_show,
2741 static int ptype_seq_open(struct inode *inode, struct file *file)
2743 return seq_open_net(inode, file, &ptype_seq_ops,
2744 sizeof(struct seq_net_private));
2747 static const struct file_operations ptype_seq_fops = {
2748 .owner = THIS_MODULE,
2749 .open = ptype_seq_open,
2751 .llseek = seq_lseek,
2752 .release = seq_release_net,
2756 static int __net_init dev_proc_net_init(struct net *net)
2760 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
2762 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
2764 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
2767 if (wext_proc_init(net))
2773 proc_net_remove(net, "ptype");
2775 proc_net_remove(net, "softnet_stat");
2777 proc_net_remove(net, "dev");
2781 static void __net_exit dev_proc_net_exit(struct net *net)
2783 wext_proc_exit(net);
2785 proc_net_remove(net, "ptype");
2786 proc_net_remove(net, "softnet_stat");
2787 proc_net_remove(net, "dev");
2790 static struct pernet_operations __net_initdata dev_proc_ops = {
2791 .init = dev_proc_net_init,
2792 .exit = dev_proc_net_exit,
2795 static int __init dev_proc_init(void)
2797 return register_pernet_subsys(&dev_proc_ops);
2800 #define dev_proc_init() 0
2801 #endif /* CONFIG_PROC_FS */
2805 * netdev_set_master - set up master/slave pair
2806 * @slave: slave device
2807 * @master: new master device
2809 * Changes the master device of the slave. Pass %NULL to break the
2810 * bonding. The caller must hold the RTNL semaphore. On a failure
2811 * a negative errno code is returned. On success the reference counts
2812 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2813 * function returns zero.
2815 int netdev_set_master(struct net_device *slave, struct net_device *master)
2817 struct net_device *old = slave->master;
2827 slave->master = master;
2835 slave->flags |= IFF_SLAVE;
2837 slave->flags &= ~IFF_SLAVE;
2839 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2843 static int __dev_set_promiscuity(struct net_device *dev, int inc)
2845 unsigned short old_flags = dev->flags;
2849 dev->flags |= IFF_PROMISC;
2850 dev->promiscuity += inc;
2851 if (dev->promiscuity == 0) {
2854 * If inc causes overflow, untouch promisc and return error.
2857 dev->flags &= ~IFF_PROMISC;
2859 dev->promiscuity -= inc;
2860 printk(KERN_WARNING "%s: promiscuity touches roof, "
2861 "set promiscuity failed, promiscuity feature "
2862 "of device might be broken.\n", dev->name);
2866 if (dev->flags != old_flags) {
2867 printk(KERN_INFO "device %s %s promiscuous mode\n",
2868 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2871 audit_log(current->audit_context, GFP_ATOMIC,
2872 AUDIT_ANOM_PROMISCUOUS,
2873 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2874 dev->name, (dev->flags & IFF_PROMISC),
2875 (old_flags & IFF_PROMISC),
2876 audit_get_loginuid(current),
2877 current->uid, current->gid,
2878 audit_get_sessionid(current));
2880 if (dev->change_rx_flags)
2881 dev->change_rx_flags(dev, IFF_PROMISC);
2887 * dev_set_promiscuity - update promiscuity count on a device
2891 * Add or remove promiscuity from a device. While the count in the device
2892 * remains above zero the interface remains promiscuous. Once it hits zero
2893 * the device reverts back to normal filtering operation. A negative inc
2894 * value is used to drop promiscuity on the device.
2895 * Return 0 if successful or a negative errno code on error.
2897 int dev_set_promiscuity(struct net_device *dev, int inc)
2899 unsigned short old_flags = dev->flags;
2902 err = __dev_set_promiscuity(dev, inc);
2905 if (dev->flags != old_flags)
2906 dev_set_rx_mode(dev);
2911 * dev_set_allmulti - update allmulti count on a device
2915 * Add or remove reception of all multicast frames to a device. While the
2916 * count in the device remains above zero the interface remains listening
2917 * to all interfaces. Once it hits zero the device reverts back to normal
2918 * filtering operation. A negative @inc value is used to drop the counter
2919 * when releasing a resource needing all multicasts.
2920 * Return 0 if successful or a negative errno code on error.
2923 int dev_set_allmulti(struct net_device *dev, int inc)
2925 unsigned short old_flags = dev->flags;
2929 dev->flags |= IFF_ALLMULTI;
2930 dev->allmulti += inc;
2931 if (dev->allmulti == 0) {
2934 * If inc causes overflow, untouch allmulti and return error.
2937 dev->flags &= ~IFF_ALLMULTI;
2939 dev->allmulti -= inc;
2940 printk(KERN_WARNING "%s: allmulti touches roof, "
2941 "set allmulti failed, allmulti feature of "
2942 "device might be broken.\n", dev->name);
2946 if (dev->flags ^ old_flags) {
2947 if (dev->change_rx_flags)
2948 dev->change_rx_flags(dev, IFF_ALLMULTI);
2949 dev_set_rx_mode(dev);
2955 * Upload unicast and multicast address lists to device and
2956 * configure RX filtering. When the device doesn't support unicast
2957 * filtering it is put in promiscuous mode while unicast addresses
2960 void __dev_set_rx_mode(struct net_device *dev)
2962 /* dev_open will call this function so the list will stay sane. */
2963 if (!(dev->flags&IFF_UP))
2966 if (!netif_device_present(dev))
2969 if (dev->set_rx_mode)
2970 dev->set_rx_mode(dev);
2972 /* Unicast addresses changes may only happen under the rtnl,
2973 * therefore calling __dev_set_promiscuity here is safe.
2975 if (dev->uc_count > 0 && !dev->uc_promisc) {
2976 __dev_set_promiscuity(dev, 1);
2977 dev->uc_promisc = 1;
2978 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2979 __dev_set_promiscuity(dev, -1);
2980 dev->uc_promisc = 0;
2983 if (dev->set_multicast_list)
2984 dev->set_multicast_list(dev);
2988 void dev_set_rx_mode(struct net_device *dev)
2990 netif_addr_lock_bh(dev);
2991 __dev_set_rx_mode(dev);
2992 netif_addr_unlock_bh(dev);
2995 int __dev_addr_delete(struct dev_addr_list **list, int *count,
2996 void *addr, int alen, int glbl)
2998 struct dev_addr_list *da;
3000 for (; (da = *list) != NULL; list = &da->next) {
3001 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3002 alen == da->da_addrlen) {
3004 int old_glbl = da->da_gusers;
3021 int __dev_addr_add(struct dev_addr_list **list, int *count,
3022 void *addr, int alen, int glbl)
3024 struct dev_addr_list *da;
3026 for (da = *list; da != NULL; da = da->next) {
3027 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3028 da->da_addrlen == alen) {
3030 int old_glbl = da->da_gusers;
3040 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3043 memcpy(da->da_addr, addr, alen);
3044 da->da_addrlen = alen;
3046 da->da_gusers = glbl ? 1 : 0;
3054 * dev_unicast_delete - Release secondary unicast address.
3056 * @addr: address to delete
3057 * @alen: length of @addr
3059 * Release reference to a secondary unicast address and remove it
3060 * from the device if the reference count drops to zero.
3062 * The caller must hold the rtnl_mutex.
3064 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3070 netif_addr_lock_bh(dev);
3071 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3073 __dev_set_rx_mode(dev);
3074 netif_addr_unlock_bh(dev);
3077 EXPORT_SYMBOL(dev_unicast_delete);
3080 * dev_unicast_add - add a secondary unicast address
3082 * @addr: address to add
3083 * @alen: length of @addr
3085 * Add a secondary unicast address to the device or increase
3086 * the reference count if it already exists.
3088 * The caller must hold the rtnl_mutex.
3090 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3096 netif_addr_lock_bh(dev);
3097 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3099 __dev_set_rx_mode(dev);
3100 netif_addr_unlock_bh(dev);
3103 EXPORT_SYMBOL(dev_unicast_add);
3105 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3106 struct dev_addr_list **from, int *from_count)
3108 struct dev_addr_list *da, *next;
3112 while (da != NULL) {
3114 if (!da->da_synced) {
3115 err = __dev_addr_add(to, to_count,
3116 da->da_addr, da->da_addrlen, 0);
3121 } else if (da->da_users == 1) {
3122 __dev_addr_delete(to, to_count,
3123 da->da_addr, da->da_addrlen, 0);
3124 __dev_addr_delete(from, from_count,
3125 da->da_addr, da->da_addrlen, 0);
3132 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3133 struct dev_addr_list **from, int *from_count)
3135 struct dev_addr_list *da, *next;
3138 while (da != NULL) {
3140 if (da->da_synced) {
3141 __dev_addr_delete(to, to_count,
3142 da->da_addr, da->da_addrlen, 0);
3144 __dev_addr_delete(from, from_count,
3145 da->da_addr, da->da_addrlen, 0);
3152 * dev_unicast_sync - Synchronize device's unicast list to another device
3153 * @to: destination device
3154 * @from: source device
3156 * Add newly added addresses to the destination device and release
3157 * addresses that have no users left. The source device must be
3158 * locked by netif_tx_lock_bh.
3160 * This function is intended to be called from the dev->set_rx_mode
3161 * function of layered software devices.
3163 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3167 netif_addr_lock_bh(to);
3168 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3169 &from->uc_list, &from->uc_count);
3171 __dev_set_rx_mode(to);
3172 netif_addr_unlock_bh(to);
3175 EXPORT_SYMBOL(dev_unicast_sync);
3178 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3179 * @to: destination device
3180 * @from: source device
3182 * Remove all addresses that were added to the destination device by
3183 * dev_unicast_sync(). This function is intended to be called from the
3184 * dev->stop function of layered software devices.
3186 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3188 netif_addr_lock_bh(from);
3189 netif_addr_lock(to);
3191 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3192 &from->uc_list, &from->uc_count);
3193 __dev_set_rx_mode(to);
3195 netif_addr_unlock(to);
3196 netif_addr_unlock_bh(from);
3198 EXPORT_SYMBOL(dev_unicast_unsync);
3200 static void __dev_addr_discard(struct dev_addr_list **list)
3202 struct dev_addr_list *tmp;
3204 while (*list != NULL) {
3207 if (tmp->da_users > tmp->da_gusers)
3208 printk("__dev_addr_discard: address leakage! "
3209 "da_users=%d\n", tmp->da_users);
3214 static void dev_addr_discard(struct net_device *dev)
3216 netif_addr_lock_bh(dev);
3218 __dev_addr_discard(&dev->uc_list);
3221 __dev_addr_discard(&dev->mc_list);
3224 netif_addr_unlock_bh(dev);
3227 unsigned dev_get_flags(const struct net_device *dev)
3231 flags = (dev->flags & ~(IFF_PROMISC |
3236 (dev->gflags & (IFF_PROMISC |
3239 if (netif_running(dev)) {
3240 if (netif_oper_up(dev))
3241 flags |= IFF_RUNNING;
3242 if (netif_carrier_ok(dev))
3243 flags |= IFF_LOWER_UP;
3244 if (netif_dormant(dev))
3245 flags |= IFF_DORMANT;
3251 int dev_change_flags(struct net_device *dev, unsigned flags)
3254 int old_flags = dev->flags;
3259 * Set the flags on our device.
3262 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3263 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3265 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3269 * Load in the correct multicast list now the flags have changed.
3272 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
3273 dev->change_rx_flags(dev, IFF_MULTICAST);
3275 dev_set_rx_mode(dev);
3278 * Have we downed the interface. We handle IFF_UP ourselves
3279 * according to user attempts to set it, rather than blindly
3284 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3285 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3288 dev_set_rx_mode(dev);
3291 if (dev->flags & IFF_UP &&
3292 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3294 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3296 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3297 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3298 dev->gflags ^= IFF_PROMISC;
3299 dev_set_promiscuity(dev, inc);
3302 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3303 is important. Some (broken) drivers set IFF_PROMISC, when
3304 IFF_ALLMULTI is requested not asking us and not reporting.
3306 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3307 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3308 dev->gflags ^= IFF_ALLMULTI;
3309 dev_set_allmulti(dev, inc);
3312 /* Exclude state transition flags, already notified */
3313 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3315 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3320 int dev_set_mtu(struct net_device *dev, int new_mtu)
3324 if (new_mtu == dev->mtu)
3327 /* MTU must be positive. */
3331 if (!netif_device_present(dev))
3335 if (dev->change_mtu)
3336 err = dev->change_mtu(dev, new_mtu);
3339 if (!err && dev->flags & IFF_UP)
3340 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3344 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3348 if (!dev->set_mac_address)
3350 if (sa->sa_family != dev->type)
3352 if (!netif_device_present(dev))
3354 err = dev->set_mac_address(dev, sa);
3356 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3361 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3363 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3366 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3372 case SIOCGIFFLAGS: /* Get interface flags */
3373 ifr->ifr_flags = dev_get_flags(dev);
3376 case SIOCGIFMETRIC: /* Get the metric on the interface
3377 (currently unused) */
3378 ifr->ifr_metric = 0;
3381 case SIOCGIFMTU: /* Get the MTU of a device */
3382 ifr->ifr_mtu = dev->mtu;
3387 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3389 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3390 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3391 ifr->ifr_hwaddr.sa_family = dev->type;
3399 ifr->ifr_map.mem_start = dev->mem_start;
3400 ifr->ifr_map.mem_end = dev->mem_end;
3401 ifr->ifr_map.base_addr = dev->base_addr;
3402 ifr->ifr_map.irq = dev->irq;
3403 ifr->ifr_map.dma = dev->dma;
3404 ifr->ifr_map.port = dev->if_port;
3408 ifr->ifr_ifindex = dev->ifindex;
3412 ifr->ifr_qlen = dev->tx_queue_len;
3416 /* dev_ioctl() should ensure this case
3428 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3430 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3433 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3439 case SIOCSIFFLAGS: /* Set interface flags */
3440 return dev_change_flags(dev, ifr->ifr_flags);
3442 case SIOCSIFMETRIC: /* Set the metric on the interface
3443 (currently unused) */
3446 case SIOCSIFMTU: /* Set the MTU of a device */
3447 return dev_set_mtu(dev, ifr->ifr_mtu);
3450 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3452 case SIOCSIFHWBROADCAST:
3453 if (ifr->ifr_hwaddr.sa_family != dev->type)
3455 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3456 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3457 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3461 if (dev->set_config) {
3462 if (!netif_device_present(dev))
3464 return dev->set_config(dev, &ifr->ifr_map);
3469 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3470 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3472 if (!netif_device_present(dev))
3474 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3478 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
3479 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3481 if (!netif_device_present(dev))
3483 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3487 if (ifr->ifr_qlen < 0)
3489 dev->tx_queue_len = ifr->ifr_qlen;
3493 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3494 return dev_change_name(dev, ifr->ifr_newname);
3497 * Unknown or private ioctl
3501 if ((cmd >= SIOCDEVPRIVATE &&
3502 cmd <= SIOCDEVPRIVATE + 15) ||
3503 cmd == SIOCBONDENSLAVE ||
3504 cmd == SIOCBONDRELEASE ||
3505 cmd == SIOCBONDSETHWADDR ||
3506 cmd == SIOCBONDSLAVEINFOQUERY ||
3507 cmd == SIOCBONDINFOQUERY ||
3508 cmd == SIOCBONDCHANGEACTIVE ||
3509 cmd == SIOCGMIIPHY ||
3510 cmd == SIOCGMIIREG ||
3511 cmd == SIOCSMIIREG ||
3512 cmd == SIOCBRADDIF ||
3513 cmd == SIOCBRDELIF ||
3514 cmd == SIOCWANDEV) {
3516 if (dev->do_ioctl) {
3517 if (netif_device_present(dev))
3518 err = dev->do_ioctl(dev, ifr,
3531 * This function handles all "interface"-type I/O control requests. The actual
3532 * 'doing' part of this is dev_ifsioc above.
3536 * dev_ioctl - network device ioctl
3537 * @net: the applicable net namespace
3538 * @cmd: command to issue
3539 * @arg: pointer to a struct ifreq in user space
3541 * Issue ioctl functions to devices. This is normally called by the
3542 * user space syscall interfaces but can sometimes be useful for
3543 * other purposes. The return value is the return from the syscall if
3544 * positive or a negative errno code on error.
3547 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3553 /* One special case: SIOCGIFCONF takes ifconf argument
3554 and requires shared lock, because it sleeps writing
3558 if (cmd == SIOCGIFCONF) {
3560 ret = dev_ifconf(net, (char __user *) arg);
3564 if (cmd == SIOCGIFNAME)
3565 return dev_ifname(net, (struct ifreq __user *)arg);
3567 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3570 ifr.ifr_name[IFNAMSIZ-1] = 0;
3572 colon = strchr(ifr.ifr_name, ':');
3577 * See which interface the caller is talking about.
3582 * These ioctl calls:
3583 * - can be done by all.
3584 * - atomic and do not require locking.
3595 dev_load(net, ifr.ifr_name);
3596 read_lock(&dev_base_lock);
3597 ret = dev_ifsioc_locked(net, &ifr, cmd);
3598 read_unlock(&dev_base_lock);
3602 if (copy_to_user(arg, &ifr,
3603 sizeof(struct ifreq)))
3609 dev_load(net, ifr.ifr_name);
3611 ret = dev_ethtool(net, &ifr);
3616 if (copy_to_user(arg, &ifr,
3617 sizeof(struct ifreq)))
3623 * These ioctl calls:
3624 * - require superuser power.
3625 * - require strict serialization.
3631 if (!capable(CAP_NET_ADMIN))
3633 dev_load(net, ifr.ifr_name);
3635 ret = dev_ifsioc(net, &ifr, cmd);
3640 if (copy_to_user(arg, &ifr,
3641 sizeof(struct ifreq)))
3647 * These ioctl calls:
3648 * - require superuser power.
3649 * - require strict serialization.
3650 * - do not return a value
3660 case SIOCSIFHWBROADCAST:
3663 case SIOCBONDENSLAVE:
3664 case SIOCBONDRELEASE:
3665 case SIOCBONDSETHWADDR:
3666 case SIOCBONDCHANGEACTIVE:
3669 if (!capable(CAP_NET_ADMIN))
3672 case SIOCBONDSLAVEINFOQUERY:
3673 case SIOCBONDINFOQUERY:
3674 dev_load(net, ifr.ifr_name);
3676 ret = dev_ifsioc(net, &ifr, cmd);
3681 /* Get the per device memory space. We can add this but
3682 * currently do not support it */
3684 /* Set the per device memory buffer space.
3685 * Not applicable in our case */
3690 * Unknown or private ioctl.
3693 if (cmd == SIOCWANDEV ||
3694 (cmd >= SIOCDEVPRIVATE &&
3695 cmd <= SIOCDEVPRIVATE + 15)) {
3696 dev_load(net, ifr.ifr_name);
3698 ret = dev_ifsioc(net, &ifr, cmd);
3700 if (!ret && copy_to_user(arg, &ifr,
3701 sizeof(struct ifreq)))
3705 /* Take care of Wireless Extensions */
3706 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
3707 return wext_handle_ioctl(net, &ifr, cmd, arg);
3714 * dev_new_index - allocate an ifindex
3715 * @net: the applicable net namespace
3717 * Returns a suitable unique value for a new device interface
3718 * number. The caller must hold the rtnl semaphore or the
3719 * dev_base_lock to be sure it remains unique.
3721 static int dev_new_index(struct net *net)
3727 if (!__dev_get_by_index(net, ifindex))
3732 /* Delayed registration/unregisteration */
3733 static DEFINE_SPINLOCK(net_todo_list_lock);
3734 static LIST_HEAD(net_todo_list);
3736 static void net_set_todo(struct net_device *dev)
3738 spin_lock(&net_todo_list_lock);
3739 list_add_tail(&dev->todo_list, &net_todo_list);
3740 spin_unlock(&net_todo_list_lock);
3743 static void rollback_registered(struct net_device *dev)
3745 BUG_ON(dev_boot_phase);
3748 /* Some devices call without registering for initialization unwind. */
3749 if (dev->reg_state == NETREG_UNINITIALIZED) {
3750 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3751 "was registered\n", dev->name, dev);
3757 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3759 /* If device is running, close it first. */
3762 /* And unlink it from device chain. */
3763 unlist_netdevice(dev);
3765 dev->reg_state = NETREG_UNREGISTERING;
3769 /* Shutdown queueing discipline. */
3773 /* Notify protocols, that we are about to destroy
3774 this device. They should clean all the things.
3776 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3779 * Flush the unicast and multicast chains
3781 dev_addr_discard(dev);
3786 /* Notifier chain MUST detach us from master device. */
3787 BUG_TRAP(!dev->master);
3789 /* Remove entries from kobject tree */
3790 netdev_unregister_kobject(dev);
3797 static void __netdev_init_queue_locks_one(struct net_device *dev,
3798 struct netdev_queue *dev_queue,
3801 spin_lock_init(&dev_queue->_xmit_lock);
3802 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3803 dev_queue->xmit_lock_owner = -1;
3806 static void netdev_init_queue_locks(struct net_device *dev)
3808 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3809 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
3813 * register_netdevice - register a network device
3814 * @dev: device to register
3816 * Take a completed network device structure and add it to the kernel
3817 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3818 * chain. 0 is returned on success. A negative errno code is returned
3819 * on a failure to set up the device, or if the name is a duplicate.
3821 * Callers must hold the rtnl semaphore. You may want
3822 * register_netdev() instead of this.
3825 * The locking appears insufficient to guarantee two parallel registers
3826 * will not get the same name.
3829 int register_netdevice(struct net_device *dev)
3831 struct hlist_head *head;
3832 struct hlist_node *p;
3836 BUG_ON(dev_boot_phase);
3841 /* When net_device's are persistent, this will be fatal. */
3842 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
3843 BUG_ON(!dev_net(dev));
3846 spin_lock_init(&dev->addr_list_lock);
3847 netdev_init_queue_locks(dev);
3851 /* Init, if this function is available */
3853 ret = dev->init(dev);
3861 if (!dev_valid_name(dev->name)) {
3866 dev->ifindex = dev_new_index(net);
3867 if (dev->iflink == -1)
3868 dev->iflink = dev->ifindex;
3870 /* Check for existence of name */
3871 head = dev_name_hash(net, dev->name);
3872 hlist_for_each(p, head) {
3873 struct net_device *d
3874 = hlist_entry(p, struct net_device, name_hlist);
3875 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3881 /* Fix illegal checksum combinations */
3882 if ((dev->features & NETIF_F_HW_CSUM) &&
3883 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3884 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3886 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3889 if ((dev->features & NETIF_F_NO_CSUM) &&
3890 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3891 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3893 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3897 /* Fix illegal SG+CSUM combinations. */
3898 if ((dev->features & NETIF_F_SG) &&
3899 !(dev->features & NETIF_F_ALL_CSUM)) {
3900 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
3902 dev->features &= ~NETIF_F_SG;
3905 /* TSO requires that SG is present as well. */
3906 if ((dev->features & NETIF_F_TSO) &&
3907 !(dev->features & NETIF_F_SG)) {
3908 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
3910 dev->features &= ~NETIF_F_TSO;
3912 if (dev->features & NETIF_F_UFO) {
3913 if (!(dev->features & NETIF_F_HW_CSUM)) {
3914 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3915 "NETIF_F_HW_CSUM feature.\n",
3917 dev->features &= ~NETIF_F_UFO;
3919 if (!(dev->features & NETIF_F_SG)) {
3920 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3921 "NETIF_F_SG feature.\n",
3923 dev->features &= ~NETIF_F_UFO;
3927 netdev_initialize_kobject(dev);
3928 ret = netdev_register_kobject(dev);
3931 dev->reg_state = NETREG_REGISTERED;
3934 * Default initial state at registry is that the
3935 * device is present.
3938 set_bit(__LINK_STATE_PRESENT, &dev->state);
3940 dev_init_scheduler(dev);
3942 list_netdevice(dev);
3944 /* Notify protocols, that a new device appeared. */
3945 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3946 ret = notifier_to_errno(ret);
3948 rollback_registered(dev);
3949 dev->reg_state = NETREG_UNREGISTERED;
3962 * register_netdev - register a network device
3963 * @dev: device to register
3965 * Take a completed network device structure and add it to the kernel
3966 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3967 * chain. 0 is returned on success. A negative errno code is returned
3968 * on a failure to set up the device, or if the name is a duplicate.
3970 * This is a wrapper around register_netdevice that takes the rtnl semaphore
3971 * and expands the device name if you passed a format string to
3974 int register_netdev(struct net_device *dev)
3981 * If the name is a format string the caller wants us to do a
3984 if (strchr(dev->name, '%')) {
3985 err = dev_alloc_name(dev, dev->name);
3990 err = register_netdevice(dev);
3995 EXPORT_SYMBOL(register_netdev);
3998 * netdev_wait_allrefs - wait until all references are gone.
4000 * This is called when unregistering network devices.
4002 * Any protocol or device that holds a reference should register
4003 * for netdevice notification, and cleanup and put back the
4004 * reference if they receive an UNREGISTER event.
4005 * We can get stuck here if buggy protocols don't correctly
4008 static void netdev_wait_allrefs(struct net_device *dev)
4010 unsigned long rebroadcast_time, warning_time;
4012 rebroadcast_time = warning_time = jiffies;
4013 while (atomic_read(&dev->refcnt) != 0) {
4014 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4017 /* Rebroadcast unregister notification */
4018 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4020 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4022 /* We must not have linkwatch events
4023 * pending on unregister. If this
4024 * happens, we simply run the queue
4025 * unscheduled, resulting in a noop
4028 linkwatch_run_queue();
4033 rebroadcast_time = jiffies;
4038 if (time_after(jiffies, warning_time + 10 * HZ)) {
4039 printk(KERN_EMERG "unregister_netdevice: "
4040 "waiting for %s to become free. Usage "
4042 dev->name, atomic_read(&dev->refcnt));
4043 warning_time = jiffies;
4052 * register_netdevice(x1);
4053 * register_netdevice(x2);
4055 * unregister_netdevice(y1);
4056 * unregister_netdevice(y2);
4062 * We are invoked by rtnl_unlock() after it drops the semaphore.
4063 * This allows us to deal with problems:
4064 * 1) We can delete sysfs objects which invoke hotplug
4065 * without deadlocking with linkwatch via keventd.
4066 * 2) Since we run with the RTNL semaphore not held, we can sleep
4067 * safely in order to wait for the netdev refcnt to drop to zero.
4069 static DEFINE_MUTEX(net_todo_run_mutex);
4070 void netdev_run_todo(void)
4072 struct list_head list;
4074 /* Need to guard against multiple cpu's getting out of order. */
4075 mutex_lock(&net_todo_run_mutex);
4077 /* Not safe to do outside the semaphore. We must not return
4078 * until all unregister events invoked by the local processor
4079 * have been completed (either by this todo run, or one on
4082 if (list_empty(&net_todo_list))
4085 /* Snapshot list, allow later requests */
4086 spin_lock(&net_todo_list_lock);
4087 list_replace_init(&net_todo_list, &list);
4088 spin_unlock(&net_todo_list_lock);
4090 while (!list_empty(&list)) {
4091 struct net_device *dev
4092 = list_entry(list.next, struct net_device, todo_list);
4093 list_del(&dev->todo_list);
4095 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4096 printk(KERN_ERR "network todo '%s' but state %d\n",
4097 dev->name, dev->reg_state);
4102 dev->reg_state = NETREG_UNREGISTERED;
4104 netdev_wait_allrefs(dev);
4107 BUG_ON(atomic_read(&dev->refcnt));
4108 BUG_TRAP(!dev->ip_ptr);
4109 BUG_TRAP(!dev->ip6_ptr);
4110 BUG_TRAP(!dev->dn_ptr);
4112 if (dev->destructor)
4113 dev->destructor(dev);
4115 /* Free network device */
4116 kobject_put(&dev->dev.kobj);
4120 mutex_unlock(&net_todo_run_mutex);
4123 static struct net_device_stats *internal_stats(struct net_device *dev)
4128 static void netdev_init_one_queue(struct net_device *dev,
4129 struct netdev_queue *queue,
4132 spin_lock_init(&queue->lock);
4136 static void netdev_init_queues(struct net_device *dev)
4138 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4139 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4143 * alloc_netdev_mq - allocate network device
4144 * @sizeof_priv: size of private data to allocate space for
4145 * @name: device name format string
4146 * @setup: callback to initialize device
4147 * @queue_count: the number of subqueues to allocate
4149 * Allocates a struct net_device with private data area for driver use
4150 * and performs basic initialization. Also allocates subquue structs
4151 * for each queue on the device at the end of the netdevice.
4153 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4154 void (*setup)(struct net_device *), unsigned int queue_count)
4156 struct netdev_queue *tx;
4157 struct net_device *dev;
4161 BUG_ON(strlen(name) >= sizeof(dev->name));
4163 alloc_size = sizeof(struct net_device) +
4164 sizeof(struct net_device_subqueue) * (queue_count - 1);
4166 /* ensure 32-byte alignment of private area */
4167 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4168 alloc_size += sizeof_priv;
4170 /* ensure 32-byte alignment of whole construct */
4171 alloc_size += NETDEV_ALIGN_CONST;
4173 p = kzalloc(alloc_size, GFP_KERNEL);
4175 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4179 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4181 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4187 dev = (struct net_device *)
4188 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4189 dev->padded = (char *)dev - (char *)p;
4190 dev_net_set(dev, &init_net);
4193 dev->num_tx_queues = queue_count;
4196 dev->priv = ((char *)dev +
4197 ((sizeof(struct net_device) +
4198 (sizeof(struct net_device_subqueue) *
4199 (queue_count - 1)) + NETDEV_ALIGN_CONST)
4200 & ~NETDEV_ALIGN_CONST));
4203 dev->egress_subqueue_count = queue_count;
4204 dev->gso_max_size = GSO_MAX_SIZE;
4206 netdev_init_queues(dev);
4208 dev->get_stats = internal_stats;
4209 netpoll_netdev_init(dev);
4211 strcpy(dev->name, name);
4214 EXPORT_SYMBOL(alloc_netdev_mq);
4217 * free_netdev - free network device
4220 * This function does the last stage of destroying an allocated device
4221 * interface. The reference to the device object is released.
4222 * If this is the last reference then it will be freed.
4224 void free_netdev(struct net_device *dev)
4226 release_net(dev_net(dev));
4230 /* Compatibility with error handling in drivers */
4231 if (dev->reg_state == NETREG_UNINITIALIZED) {
4232 kfree((char *)dev - dev->padded);
4236 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4237 dev->reg_state = NETREG_RELEASED;
4239 /* will free via device release */
4240 put_device(&dev->dev);
4243 /* Synchronize with packet receive processing. */
4244 void synchronize_net(void)
4251 * unregister_netdevice - remove device from the kernel
4254 * This function shuts down a device interface and removes it
4255 * from the kernel tables.
4257 * Callers must hold the rtnl semaphore. You may want
4258 * unregister_netdev() instead of this.
4261 void unregister_netdevice(struct net_device *dev)
4265 rollback_registered(dev);
4266 /* Finish processing unregister after unlock */
4271 * unregister_netdev - remove device from the kernel
4274 * This function shuts down a device interface and removes it
4275 * from the kernel tables.
4277 * This is just a wrapper for unregister_netdevice that takes
4278 * the rtnl semaphore. In general you want to use this and not
4279 * unregister_netdevice.
4281 void unregister_netdev(struct net_device *dev)
4284 unregister_netdevice(dev);
4288 EXPORT_SYMBOL(unregister_netdev);
4291 * dev_change_net_namespace - move device to different nethost namespace
4293 * @net: network namespace
4294 * @pat: If not NULL name pattern to try if the current device name
4295 * is already taken in the destination network namespace.
4297 * This function shuts down a device interface and moves it
4298 * to a new network namespace. On success 0 is returned, on
4299 * a failure a netagive errno code is returned.
4301 * Callers must hold the rtnl semaphore.
4304 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4307 const char *destname;
4312 /* Don't allow namespace local devices to be moved. */
4314 if (dev->features & NETIF_F_NETNS_LOCAL)
4317 /* Ensure the device has been registrered */
4319 if (dev->reg_state != NETREG_REGISTERED)
4322 /* Get out if there is nothing todo */
4324 if (net_eq(dev_net(dev), net))
4327 /* Pick the destination device name, and ensure
4328 * we can use it in the destination network namespace.
4331 destname = dev->name;
4332 if (__dev_get_by_name(net, destname)) {
4333 /* We get here if we can't use the current device name */
4336 if (!dev_valid_name(pat))
4338 if (strchr(pat, '%')) {
4339 if (__dev_alloc_name(net, pat, buf) < 0)
4344 if (__dev_get_by_name(net, destname))
4349 * And now a mini version of register_netdevice unregister_netdevice.
4352 /* If device is running close it first. */
4355 /* And unlink it from device chain */
4357 unlist_netdevice(dev);
4361 /* Shutdown queueing discipline. */
4364 /* Notify protocols, that we are about to destroy
4365 this device. They should clean all the things.
4367 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4370 * Flush the unicast and multicast chains
4372 dev_addr_discard(dev);
4374 /* Actually switch the network namespace */
4375 dev_net_set(dev, net);
4377 /* Assign the new device name */
4378 if (destname != dev->name)
4379 strcpy(dev->name, destname);
4381 /* If there is an ifindex conflict assign a new one */
4382 if (__dev_get_by_index(net, dev->ifindex)) {
4383 int iflink = (dev->iflink == dev->ifindex);
4384 dev->ifindex = dev_new_index(net);
4386 dev->iflink = dev->ifindex;
4389 /* Fixup kobjects */
4390 netdev_unregister_kobject(dev);
4391 err = netdev_register_kobject(dev);
4394 /* Add the device back in the hashes */
4395 list_netdevice(dev);
4397 /* Notify protocols, that a new device appeared. */
4398 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4406 static int dev_cpu_callback(struct notifier_block *nfb,
4407 unsigned long action,
4410 struct sk_buff **list_skb;
4411 struct netdev_queue **list_net;
4412 struct sk_buff *skb;
4413 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4414 struct softnet_data *sd, *oldsd;
4416 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
4419 local_irq_disable();
4420 cpu = smp_processor_id();
4421 sd = &per_cpu(softnet_data, cpu);
4422 oldsd = &per_cpu(softnet_data, oldcpu);
4424 /* Find end of our completion_queue. */
4425 list_skb = &sd->completion_queue;
4427 list_skb = &(*list_skb)->next;
4428 /* Append completion queue from offline CPU. */
4429 *list_skb = oldsd->completion_queue;
4430 oldsd->completion_queue = NULL;
4432 /* Find end of our output_queue. */
4433 list_net = &sd->output_queue;
4435 list_net = &(*list_net)->next_sched;
4436 /* Append output queue from offline CPU. */
4437 *list_net = oldsd->output_queue;
4438 oldsd->output_queue = NULL;
4440 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4443 /* Process offline CPU's input_pkt_queue */
4444 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4450 #ifdef CONFIG_NET_DMA
4452 * net_dma_rebalance - try to maintain one DMA channel per CPU
4453 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4455 * This is called when the number of channels allocated to the net_dma client
4456 * changes. The net_dma client tries to have one DMA channel per CPU.
4459 static void net_dma_rebalance(struct net_dma *net_dma)
4461 unsigned int cpu, i, n, chan_idx;
4462 struct dma_chan *chan;
4464 if (cpus_empty(net_dma->channel_mask)) {
4465 for_each_online_cpu(cpu)
4466 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
4471 cpu = first_cpu(cpu_online_map);
4473 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4474 chan = net_dma->channels[chan_idx];
4476 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4477 + (i < (num_online_cpus() %
4478 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
4481 per_cpu(softnet_data, cpu).net_dma = chan;
4482 cpu = next_cpu(cpu, cpu_online_map);
4490 * netdev_dma_event - event callback for the net_dma_client
4491 * @client: should always be net_dma_client
4492 * @chan: DMA channel for the event
4493 * @state: DMA state to be handled
4495 static enum dma_state_client
4496 netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4497 enum dma_state state)
4499 int i, found = 0, pos = -1;
4500 struct net_dma *net_dma =
4501 container_of(client, struct net_dma, client);
4502 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4504 spin_lock(&net_dma->lock);
4506 case DMA_RESOURCE_AVAILABLE:
4507 for (i = 0; i < nr_cpu_ids; i++)
4508 if (net_dma->channels[i] == chan) {
4511 } else if (net_dma->channels[i] == NULL && pos < 0)
4514 if (!found && pos >= 0) {
4516 net_dma->channels[pos] = chan;
4517 cpu_set(pos, net_dma->channel_mask);
4518 net_dma_rebalance(net_dma);
4521 case DMA_RESOURCE_REMOVED:
4522 for (i = 0; i < nr_cpu_ids; i++)
4523 if (net_dma->channels[i] == chan) {
4531 cpu_clear(pos, net_dma->channel_mask);
4532 net_dma->channels[i] = NULL;
4533 net_dma_rebalance(net_dma);
4539 spin_unlock(&net_dma->lock);
4545 * netdev_dma_regiser - register the networking subsystem as a DMA client
4547 static int __init netdev_dma_register(void)
4549 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4551 if (unlikely(!net_dma.channels)) {
4553 "netdev_dma: no memory for net_dma.channels\n");
4556 spin_lock_init(&net_dma.lock);
4557 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4558 dma_async_client_register(&net_dma.client);
4559 dma_async_client_chan_request(&net_dma.client);
4564 static int __init netdev_dma_register(void) { return -ENODEV; }
4565 #endif /* CONFIG_NET_DMA */
4568 * netdev_compute_feature - compute conjunction of two feature sets
4569 * @all: first feature set
4570 * @one: second feature set
4572 * Computes a new feature set after adding a device with feature set
4573 * @one to the master device with current feature set @all. Returns
4574 * the new feature set.
4576 int netdev_compute_features(unsigned long all, unsigned long one)
4578 /* if device needs checksumming, downgrade to hw checksumming */
4579 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4580 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4582 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4583 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4584 all ^= NETIF_F_HW_CSUM
4585 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4587 if (one & NETIF_F_GSO)
4588 one |= NETIF_F_GSO_SOFTWARE;
4591 /* If even one device supports robust GSO, enable it for all. */
4592 if (one & NETIF_F_GSO_ROBUST)
4593 all |= NETIF_F_GSO_ROBUST;
4595 all &= one | NETIF_F_LLTX;
4597 if (!(all & NETIF_F_ALL_CSUM))
4599 if (!(all & NETIF_F_SG))
4600 all &= ~NETIF_F_GSO_MASK;
4604 EXPORT_SYMBOL(netdev_compute_features);
4606 static struct hlist_head *netdev_create_hash(void)
4609 struct hlist_head *hash;
4611 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4613 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4614 INIT_HLIST_HEAD(&hash[i]);
4619 /* Initialize per network namespace state */
4620 static int __net_init netdev_init(struct net *net)
4622 INIT_LIST_HEAD(&net->dev_base_head);
4624 net->dev_name_head = netdev_create_hash();
4625 if (net->dev_name_head == NULL)
4628 net->dev_index_head = netdev_create_hash();
4629 if (net->dev_index_head == NULL)
4635 kfree(net->dev_name_head);
4640 static void __net_exit netdev_exit(struct net *net)
4642 kfree(net->dev_name_head);
4643 kfree(net->dev_index_head);
4646 static struct pernet_operations __net_initdata netdev_net_ops = {
4647 .init = netdev_init,
4648 .exit = netdev_exit,
4651 static void __net_exit default_device_exit(struct net *net)
4653 struct net_device *dev, *next;
4655 * Push all migratable of the network devices back to the
4656 * initial network namespace
4659 for_each_netdev_safe(net, dev, next) {
4661 char fb_name[IFNAMSIZ];
4663 /* Ignore unmoveable devices (i.e. loopback) */
4664 if (dev->features & NETIF_F_NETNS_LOCAL)
4667 /* Push remaing network devices to init_net */
4668 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4669 err = dev_change_net_namespace(dev, &init_net, fb_name);
4671 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
4672 __func__, dev->name, err);
4679 static struct pernet_operations __net_initdata default_device_ops = {
4680 .exit = default_device_exit,
4684 * Initialize the DEV module. At boot time this walks the device list and
4685 * unhooks any devices that fail to initialise (normally hardware not
4686 * present) and leaves us with a valid list of present and active devices.
4691 * This is called single threaded during boot, so no need
4692 * to take the rtnl semaphore.
4694 static int __init net_dev_init(void)
4696 int i, rc = -ENOMEM;
4698 BUG_ON(!dev_boot_phase);
4700 if (dev_proc_init())
4703 if (netdev_kobject_init())
4706 INIT_LIST_HEAD(&ptype_all);
4707 for (i = 0; i < PTYPE_HASH_SIZE; i++)
4708 INIT_LIST_HEAD(&ptype_base[i]);
4710 if (register_pernet_subsys(&netdev_net_ops))
4713 if (register_pernet_device(&default_device_ops))
4717 * Initialise the packet receive queues.
4720 for_each_possible_cpu(i) {
4721 struct softnet_data *queue;
4723 queue = &per_cpu(softnet_data, i);
4724 skb_queue_head_init(&queue->input_pkt_queue);
4725 queue->completion_queue = NULL;
4726 INIT_LIST_HEAD(&queue->poll_list);
4728 queue->backlog.poll = process_backlog;
4729 queue->backlog.weight = weight_p;
4732 netdev_dma_register();
4736 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4737 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4739 hotcpu_notifier(dev_cpu_callback, 0);
4747 subsys_initcall(net_dev_init);
4749 EXPORT_SYMBOL(__dev_get_by_index);
4750 EXPORT_SYMBOL(__dev_get_by_name);
4751 EXPORT_SYMBOL(__dev_remove_pack);
4752 EXPORT_SYMBOL(dev_valid_name);
4753 EXPORT_SYMBOL(dev_add_pack);
4754 EXPORT_SYMBOL(dev_alloc_name);
4755 EXPORT_SYMBOL(dev_close);
4756 EXPORT_SYMBOL(dev_get_by_flags);
4757 EXPORT_SYMBOL(dev_get_by_index);
4758 EXPORT_SYMBOL(dev_get_by_name);
4759 EXPORT_SYMBOL(dev_open);
4760 EXPORT_SYMBOL(dev_queue_xmit);
4761 EXPORT_SYMBOL(dev_remove_pack);
4762 EXPORT_SYMBOL(dev_set_allmulti);
4763 EXPORT_SYMBOL(dev_set_promiscuity);
4764 EXPORT_SYMBOL(dev_change_flags);
4765 EXPORT_SYMBOL(dev_set_mtu);
4766 EXPORT_SYMBOL(dev_set_mac_address);
4767 EXPORT_SYMBOL(free_netdev);
4768 EXPORT_SYMBOL(netdev_boot_setup_check);
4769 EXPORT_SYMBOL(netdev_set_master);
4770 EXPORT_SYMBOL(netdev_state_change);
4771 EXPORT_SYMBOL(netif_receive_skb);
4772 EXPORT_SYMBOL(netif_rx);
4773 EXPORT_SYMBOL(register_gifconf);
4774 EXPORT_SYMBOL(register_netdevice);
4775 EXPORT_SYMBOL(register_netdevice_notifier);
4776 EXPORT_SYMBOL(skb_checksum_help);
4777 EXPORT_SYMBOL(synchronize_net);
4778 EXPORT_SYMBOL(unregister_netdevice);
4779 EXPORT_SYMBOL(unregister_netdevice_notifier);
4780 EXPORT_SYMBOL(net_enable_timestamp);
4781 EXPORT_SYMBOL(net_disable_timestamp);
4782 EXPORT_SYMBOL(dev_get_flags);
4784 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4785 EXPORT_SYMBOL(br_handle_frame_hook);
4786 EXPORT_SYMBOL(br_fdb_get_hook);
4787 EXPORT_SYMBOL(br_fdb_put_hook);
4791 EXPORT_SYMBOL(dev_load);
4794 EXPORT_PER_CPU_SYMBOL(softnet_data);