X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fnetdevice.h;h=7c1d4466583b74e05ea09a16f86ea96a98651e8f;hb=dd32f7effdd2f3f348ef91ca1649d78a0ab2b103;hp=b0813c3286b1bbfd035415c86cf592f39a52245b;hpb=2d94dfc8c38edf63e91e48fd55c3a8822b6a9ced;p=linux-2.6 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b0813c3286..7c1d446658 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -322,7 +322,7 @@ enum NAPI_STATE_DISABLE, /* Disable pending */ }; -extern void FASTCALL(__napi_schedule(struct napi_struct *n)); +extern void __napi_schedule(struct napi_struct *n); static inline int napi_disable_pending(struct napi_struct *n) { @@ -383,9 +383,11 @@ static inline void __napi_complete(struct napi_struct *n) static inline void napi_complete(struct napi_struct *n) { - local_irq_disable(); + unsigned long flags; + + local_irq_save(flags); __napi_complete(n); - local_irq_enable(); + local_irq_restore(flags); } /** @@ -604,6 +606,10 @@ struct net_device unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + /* ingress path synchronizer */ + spinlock_t ingress_lock; + struct Qdisc *qdisc_ingress; + /* * Cache line mostly used on queue transmit path (qdisc) */ @@ -617,10 +623,6 @@ struct net_device /* Partially transmitted GSO packet. */ struct sk_buff *gso_skb; - /* ingress path synchronizer */ - spinlock_t ingress_lock; - struct Qdisc *qdisc_ingress; - /* * One part is mostly used on xmit path (device) */ @@ -708,8 +710,10 @@ struct net_device void (*poll_controller)(struct net_device *dev); #endif +#ifdef CONFIG_NET_NS /* Network namespace this network device is inside */ struct net *nd_net; +#endif /* bridge stuff */ struct net_bridge_port *br_port; @@ -724,6 +728,10 @@ struct net_device /* rtnetlink link ops */ const struct rtnl_link_ops *rtnl_link_ops; + /* for setting kernel sock attribute on TCP connection setup */ +#define GSO_MAX_SIZE 65536 + unsigned int gso_max_size; + /* The TX queue control structures */ unsigned int egress_subqueue_count; struct net_device_subqueue egress_subqueue[1]; @@ -733,6 +741,28 @@ struct net_device #define NETDEV_ALIGN 32 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) +/* + * Net namespace inlines + */ +static inline +struct net *dev_net(const struct net_device *dev) +{ +#ifdef CONFIG_NET_NS + return dev->nd_net; +#else + return &init_net; +#endif +} + +static inline +void dev_net_set(struct net_device *dev, struct net *net) +{ +#ifdef CONFIG_NET_NS + release_net(dev->nd_net); + dev->nd_net = hold_net(net); +#endif +} + /** * netdev_priv - access network device private data * @dev: network device @@ -809,7 +839,7 @@ static inline struct net_device *next_net_device(struct net_device *dev) struct list_head *lh; struct net *net; - net = dev->nd_net; + net = dev_net(dev); lh = dev->dev_list.next; return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } @@ -1072,12 +1102,14 @@ static inline int netif_is_multiqueue(const struct net_device *dev) } /* Use this variant when it is known for sure that it - * is executing from interrupt context. + * is executing from hardware interrupt context or with hardware interrupts + * disabled. */ extern void dev_kfree_skb_irq(struct sk_buff *skb); /* Use this variant in places where it could be invoked - * either from interrupt or non-interrupt context. + * from either hardware interrupt or other context, with hardware interrupts + * either disabled or enabled. */ extern void dev_kfree_skb_any(struct sk_buff *skb); @@ -1414,12 +1446,16 @@ extern void dev_set_rx_mode(struct net_device *dev); extern void __dev_set_rx_mode(struct net_device *dev); extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); +extern int dev_unicast_sync(struct net_device *to, struct net_device *from); +extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); extern int dev_mc_sync(struct net_device *to, struct net_device *from); extern void dev_mc_unsync(struct net_device *to, struct net_device *from); extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); +extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); +extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); extern void dev_set_promiscuity(struct net_device *dev, int inc); extern void dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); @@ -1471,6 +1507,12 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } +static inline void netif_set_gso_max_size(struct net_device *dev, + unsigned int size) +{ + dev->gso_max_size = size; +} + /* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled.