From: Al Viro Date: Wed, 15 Nov 2006 05:24:49 +0000 (-0800) Subject: [NET]: Annotate callers of csum_fold() in net/* X-Git-Tag: v2.6.20-rc1~34^2~40^2~320 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d3bc23e7ee9db8023dff5a86bb3b0069ed018789;p=linux-2.6 [NET]: Annotate callers of csum_fold() in net/* Signed-off-by: Al Viro Signed-off-by: David S. Miller --- diff --git a/net/core/datagram.c b/net/core/datagram.c index f558c61aec..e5a05a046f 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -413,9 +413,9 @@ fault: unsigned int __skb_checksum_complete(struct sk_buff *skb) { - unsigned int sum; + __sum16 sum; - sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); + sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); @@ -441,7 +441,7 @@ EXPORT_SYMBOL(__skb_checksum_complete); int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov) { - unsigned int csum; + __wsum csum; int chunk = skb->len - hlen; /* Skip filled elements. @@ -460,7 +460,7 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; - if ((unsigned short)csum_fold(csum)) + if (csum_fold(csum)) goto csum_error; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); diff --git a/net/core/dev.c b/net/core/dev.c index a7be106d0f..1a36b17f4b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1169,7 +1169,7 @@ EXPORT_SYMBOL(netif_device_attach); */ int skb_checksum_help(struct sk_buff *skb) { - unsigned int csum; + __wsum csum; int ret = 0, offset = skb->h.raw - skb->data; if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -1193,7 +1193,7 @@ int skb_checksum_help(struct sk_buff *skb) BUG_ON(offset <= 0); BUG_ON(skb->csum + 2 > offset); - *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + *(__sum16*)(skb->h.raw + skb->csum) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 523141ee92..edd3246873 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -88,7 +88,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); if (skb->ip_summed == CHECKSUM_COMPLETE && - !(u16)csum_fold(csum_add(psum, skb->csum))) + !csum_fold(csum_add(psum, skb->csum))) return 0; skb->csum = psum; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b3dea1ef95..dfa02cc8d6 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1396,7 +1396,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) { - unsigned int csum; + __wsum csum; long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -1416,7 +1416,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) if (skb->ip_summed == CHECKSUM_PARTIAL) { long csstuff = csstart + skb->csum; - *((unsigned short *)(to + csstuff)) = csum_fold(csum); + *((__sum16 *)(to + csstuff)) = csum_fold(csum); } } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index b39a37a475..cb9da0842b 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, ip_flush_pending_frames(icmp_socket->sk); else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = skb->h.icmph; - unsigned int csum = 0; + __wsum csum = 0; struct sk_buff *skb1; skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { @@ -931,7 +931,7 @@ int icmp_rcv(struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!(u16)csum_fold(skb->csum)) + if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6eee71647b..0017ccb01d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -932,7 +932,7 @@ int igmp_rcv(struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!(u16)csum_fold(skb->csum)) + if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 25221146de..2bf54adee8 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -557,7 +557,7 @@ static int ipgre_rcv(struct sk_buff *skb) struct iphdr *iph; u8 *h; __be16 flags; - u16 csum = 0; + __sum16 csum = 0; __be32 key = 0; u32 seqno = 0; struct ip_tunnel *tunnel; @@ -580,7 +580,7 @@ static int ipgre_rcv(struct sk_buff *skb) if (flags&GRE_CSUM) { switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - csum = (u16)csum_fold(skb->csum); + csum = csum_fold(skb->csum); if (!csum) break; /* fall through */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 90942a384a..5f3e35c036 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1384,7 +1384,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar &ipc, rt, MSG_DONTWAIT); if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { if (arg->csumoffset >= 0) - *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); + *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 97cfa97c8a..efcf45ecc8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1493,7 +1493,7 @@ static int pim_rcv(struct sk_buff * skb) if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || (pim->flags&PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; /* check if the inner packet is destined to mcast group */ diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 1445bb47fe..fac2dffd66 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -538,7 +538,7 @@ static unsigned int ip_vs_post_routing(unsigned int hooknum, u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) { - return (u16) csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); + return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); } static inline struct sk_buff * diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index e49441ac35..b797a37c01 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -172,7 +172,7 @@ unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN) break; - if ((protocol == 0 && !(u16)csum_fold(skb->csum)) || + if ((protocol == 0 && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dadef867a3..168f9de906 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2162,7 +2162,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) struct tcphdr *th; unsigned thlen; unsigned int seq; - unsigned int delta; + __be32 delta; unsigned int oldlen; unsigned int len; @@ -2215,7 +2215,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) do { th->fin = th->psh = 0; - th->check = ~csum_fold(th->check + delta); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb->h.raw, thlen, skb->csum)); @@ -2229,7 +2230,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) } while (skb->next); delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); - th->check = ~csum_fold(th->check + delta); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb->h.raw, thlen, skb->csum)); diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 6f17527b9e..61a038fc30 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -166,7 +166,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) } if (desc.count) return -1; - if ((unsigned short)csum_fold(desc.csum)) + if (csum_fold(desc.csum)) return -1; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev);