From: Heiko Carstens Date: Thu, 29 Jun 2006 12:56:13 +0000 (+0200) Subject: [S390] cleanup bitops.h. X-Git-Tag: v2.6.18-rc1~360^2~27 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c406abd3a6d0cf5ce8db4db155a729a28fb98c4f;p=linux-2.6 [S390] cleanup bitops.h. Encapsulate complete bitops.h with #ifdef __KERNEL__ and remove the now superfluous ALIGN_CS define and its users. This patch is needed for compiling klibc. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 4d2b126ba1..0ddcdba79e 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -12,6 +12,9 @@ * Copyright (C) 1992, Linus Torvalds * */ + +#ifdef __KERNEL__ + #include /* @@ -50,19 +53,6 @@ * with operation of the form "set_bit(bitnr, flags)". */ -/* set ALIGN_CS to 1 if the SMP safe bit operations should - * align the address to 4 byte boundary. It seems to work - * without the alignment. - */ -#ifdef __KERNEL__ -#define ALIGN_CS 0 -#else -#define ALIGN_CS 1 -#ifndef CONFIG_SMP -#error "bitops won't work without CONFIG_SMP" -#endif -#endif - /* bitmap tables from arch/S390/kernel/bitmap.S */ extern const char _oi_bitmap[]; extern const char _ni_bitmap[]; @@ -121,10 +111,6 @@ static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make OR mask */ @@ -141,10 +127,6 @@ static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make AND mask */ @@ -161,10 +143,6 @@ static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make XOR mask */ @@ -182,10 +160,6 @@ test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make OR/test mask */ @@ -205,10 +179,6 @@ test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make AND/test mask */ @@ -228,10 +198,6 @@ test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) unsigned long addr, old, new, mask; addr = (unsigned long) ptr; -#if ALIGN_CS == 1 - nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ - addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ -#endif /* calculate address for CS */ addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; /* make XOR/test mask */ @@ -834,8 +800,6 @@ static inline int sched_find_first_bit(unsigned long *b) #include -#ifdef __KERNEL__ - /* * ATTENTION: intel byte ordering convention for ext2 and minix !! * bit 0 is the LSB of addr; bit 31 is the MSB of addr;