X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-powerpc%2Fio.h;h=e44cdfc8493a8f30a2b4a4644884c429f5b1f966;hb=07232b971577442de0821afa0abafb6531b6192e;hp=affba7052fb6dc935cc9fa41594306b9dcf13f0a;hpb=ab3e975e7c8b5efb452bdb0d06c1cb7399f83979;p=linux-2.6 diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index affba7052f..e44cdfc849 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -138,12 +138,12 @@ DEF_MMIO_IN_BE(in_be64, 64, ld); /* There is no asm instructions for 64 bits reverse loads and stores */ static inline u64 in_le64(const volatile u64 __iomem *addr) { - return le64_to_cpu(in_be64(addr)); + return swab64(in_be64(addr)); } static inline void out_le64(volatile u64 __iomem *addr, u64 val) { - out_be64(addr, cpu_to_le64(val)); + out_be64(addr, swab64(val)); } #endif /* __powerpc64__ */ @@ -498,23 +498,6 @@ static inline void name at \ #define writeq writeq #endif -#ifdef CONFIG_NOT_COHERENT_CACHE - -#define dma_cache_inv(_start,_size) \ - invalidate_dcache_range(_start, (_start + _size)) -#define dma_cache_wback(_start,_size) \ - clean_dcache_range(_start, (_start + _size)) -#define dma_cache_wback_inv(_start,_size) \ - flush_dcache_range(_start, (_start + _size)) - -#else /* CONFIG_NOT_COHERENT_CACHE */ - -#define dma_cache_inv(_start,_size) do { } while (0) -#define dma_cache_wback(_start,_size) do { } while (0) -#define dma_cache_wback_inv(_start,_size) do { } while (0) - -#endif /* !CONFIG_NOT_COHERENT_CACHE */ - /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access @@ -539,7 +522,7 @@ static inline void name at \ #else /* * Enforce synchronisation of stores vs. spin_unlock - * (this does it explicitely, though our implementation of spin_unlock + * (this does it explicitly, though our implementation of spin_unlock * does it implicitely too) */ static inline void mmiowb(void)