From: Al Viro Date: Wed, 15 Nov 2006 05:14:53 +0000 (-0800) Subject: [NET]: Alpha checksum annotations and cleanups. X-Git-Tag: v2.6.20-rc1~34^2~40^2~346 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9be259aae5264511fe0a8b5e3d6711e0fd1d55df;p=linux-2.6 [NET]: Alpha checksum annotations and cleanups. * sanitize prototypes and annotate * kill useless access_ok() in csum_partial_copy_from_user() (the only caller checks it already). * do_csum_partial_copy_from_user() is not needed now * replace htons(len) with len << 8 - they are the same wrt checksums on little-endian. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 89044e6385..ab3761c437 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -41,28 +41,25 @@ static inline unsigned short from64to16(unsigned long x) * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented. */ -unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum) + __wsum sum) { - return ~from64to16(saddr + daddr + sum + - ((unsigned long) ntohs(len) << 16) + - ((unsigned long) proto << 8)); + return (__force __sum16)~from64to16( + (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8)); } -unsigned int csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum) + __wsum sum) { unsigned long result; - result = (saddr + daddr + sum + - ((unsigned long) ntohs(len) << 16) + - ((unsigned long) proto << 8)); + result = (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8); /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ @@ -70,7 +67,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr, result = (result & 0xffffffff) + (result >> 32); /* 33 to 32 */ result = (result & 0xffffffff) + (result >> 32); - return result; + return (__force __wsum)result; } /* @@ -146,9 +143,9 @@ out: * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ -unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) { - return ~do_csum(iph,ihl*4); + return (__force __sum16)~do_csum(iph,ihl*4); } /* @@ -163,15 +160,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) * * it's best to have buff aligned on a 32-bit boundary */ -unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) +__wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned long result = do_csum(buff, len); /* add in old sum, and carry.. */ - result += sum; + result += (__force u32)sum; /* 32+c bits -> 32 bits */ result = (result & 0xffffffff) + (result >> 32); - return result; + return (__force __wsum)result; } EXPORT_SYMBOL(csum_partial); @@ -180,7 +177,7 @@ EXPORT_SYMBOL(csum_partial); * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ -unsigned short ip_compute_csum(unsigned char * buff, int len) +__sum16 ip_compute_csum(const void *buff, int len) { - return ~from64to16(do_csum(buff,len)); + return (__force __sum16)~from64to16(do_csum(buff,len)); } diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index a37948f303..4ca75c74ce 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -329,11 +329,11 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, return checksum; } -static unsigned int -do_csum_partial_copy_from_user(const char __user *src, char *dst, int len, - unsigned int sum, int *errp) +__wsum +csum_partial_copy_from_user(const void __user *src, void *dst, int len, + __wsum sum, int *errp) { - unsigned long checksum = (unsigned) sum; + unsigned long checksum = (__force u32) sum; unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; @@ -367,25 +367,12 @@ do_csum_partial_copy_from_user(const char __user *src, char *dst, int len, } checksum = from64to16 (checksum); } - return checksum; -} - -unsigned int -csum_partial_copy_from_user(const char __user *src, char *dst, int len, - unsigned int sum, int *errp) -{ - if (!access_ok(VERIFY_READ, src, len)) { - *errp = -EFAULT; - memset(dst, 0, len); - return sum; - } - - return do_csum_partial_copy_from_user(src, dst, len, sum, errp); + return (__force __wsum)checksum; } -unsigned int -csum_partial_copy_nocheck(const char __user *src, char *dst, int len, - unsigned int sum) +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { - return do_csum_partial_copy_from_user(src, dst, len, sum, NULL); + return csum_partial_copy_from_user((__force const void __user *)src, + dst, len, sum, NULL); } diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h index a5c9f08447..d3854bbf0a 100644 --- a/include/asm-alpha/checksum.h +++ b/include/asm-alpha/checksum.h @@ -7,21 +7,20 @@ * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ -extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl); +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -extern unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, +extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum); + __wsum sum); -unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum); + __wsum sum); /* * computes the checksum of a memory block at buff, length len, @@ -35,7 +34,7 @@ unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, * * it's best to have buff aligned on a 32-bit boundary */ -extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); +extern __wsum csum_partial(const void *buff, int len, __wsum sum); /* * the same as csum_partial, but copies from src while it @@ -44,9 +43,9 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, int len, unsigned int sum, int *errp); +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); -unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); /* @@ -54,24 +53,23 @@ unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsi * in icmp.c */ -extern unsigned short ip_compute_csum(unsigned char * buff, int len); +extern __sum16 ip_compute_csum(const void *buff, int len); /* * Fold a partial checksum without adding pseudo headers */ -static inline unsigned short csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum csum) { + u32 sum = (__force u32)csum; sum = (sum & 0xffff) + (sum >> 16); sum = (sum & 0xffff) + (sum >> 16); - return ~sum; + return (__force __sum16)~sum; } #define _HAVE_ARCH_IPV6_CSUM -extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr, - struct in6_addr *daddr, - __u32 len, - unsigned short proto, - unsigned int sum); - +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum); #endif