From: Andi Kleen Date: Thu, 7 Dec 2006 01:14:07 +0000 (+0100) Subject: [PATCH] x86-64: Don't force inlining of do_csum X-Git-Tag: v2.6.20-rc1~145^2^2~8^2~80 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b6bcc4bb1cdfbc3c8612aad63a8703ac3d59f61a;p=linux-2.6 [PATCH] x86-64: Don't force inlining of do_csum It's two big and used by two callers. Calls should be cheap enough anyways. Signed-off-by: Andi Kleen --- diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c index 06ae630de8..bc503f5069 100644 --- a/arch/x86_64/lib/csum-partial.c +++ b/arch/x86_64/lib/csum-partial.c @@ -9,8 +9,6 @@ #include #include -#define __force_inline inline __attribute__((always_inline)) - static inline unsigned short from32to16(unsigned a) { unsigned short b = a >> 16; @@ -33,7 +31,7 @@ static inline unsigned short from32to16(unsigned a) * Unrolling to an 128 bytes inner loop. * Using interleaving with more registers to break the carry chains. */ -static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len) +static unsigned do_csum(const unsigned char *buff, unsigned len) { unsigned odd, count; unsigned long result = 0;