X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=2220b9e2dab095b6c2c0bea11fe1e0f4417145b4;hb=4497b0763cb1afae463f5e144c28b5d806e28b60;hp=c2116200580ab9ca545a19dce868d2fc74a99892;hpb=f525c06d12b72cddb085df7f6f348c3c5a39b3ce;p=linux-2.6 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c211620058..2220b9e2da 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -242,6 +242,7 @@ typedef unsigned char *sk_buff_data_t; * @queue_mapping: Queue mapping for multiqueue devices * @tc_index: Traffic control index * @tc_verd: traffic control verdict + * @ndisc_nodetype: router type (from link layer) * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -663,11 +664,21 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, } /* - * Insert an sk_buff at the start of a list. + * Insert an sk_buff on a list. * * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ +extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); +static inline void __skb_insert(struct sk_buff *newsk, + struct sk_buff *prev, struct sk_buff *next, + struct sk_buff_head *list) +{ + newsk->next = next; + newsk->prev = prev; + next->prev = prev->next = newsk; + list->qlen++; +} /** * __skb_queue_after - queue a buffer at the list head @@ -684,13 +695,17 @@ static inline void __skb_queue_after(struct sk_buff_head *list, struct sk_buff *prev, struct sk_buff *newsk) { - struct sk_buff *next; - list->qlen++; + __skb_insert(newsk, prev, prev->next, list); +} - next = prev->next; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; +extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, + struct sk_buff_head *list); + +static inline void __skb_queue_before(struct sk_buff_head *list, + struct sk_buff *next, + struct sk_buff *newsk) +{ + __skb_insert(newsk, next->prev, next, list); } /** @@ -724,37 +739,7 @@ extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) { - struct sk_buff *prev, *next; - - list->qlen++; - next = (struct sk_buff *)list; - prev = next->prev; - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; -} - -/* - * Insert a packet on a list. - */ -extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_insert(struct sk_buff *newsk, - struct sk_buff *prev, struct sk_buff *next, - struct sk_buff_head *list) -{ - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; - list->qlen++; -} - -/* - * Place a packet after a given packet in a list. - */ -extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); -static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) -{ - __skb_insert(newsk, old, old->next, list); + __skb_queue_before(list, (struct sk_buff *)list, newsk); } /* @@ -1717,6 +1702,20 @@ static inline int skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); + +static inline bool skb_warn_if_lro(const struct sk_buff *skb) +{ + /* LRO sets gso_size but not gso_type, whereas if GSO is really + * wanted then gso_type will be set. */ + struct skb_shared_info *shinfo = skb_shinfo(skb); + if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { + __skb_warn_lro_forwarding(skb); + return true; + } + return false; +} + static inline void skb_forward_csum(struct sk_buff *skb) { /* Unfortunately we don't support this one. Any brave souls? */