X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-mips%2Fio.h;h=e62058b0d28c37c48d7a71c40c7271322fed5e63;hb=8912858bcb9e314549a4abc15db6b5841a96fa2c;hp=92ec2618560c984355e653d33d5dc935e5e1488c;hpb=e0cc09e295f346b7921e921f385fe5213472316a;p=linux-2.6 diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index 92ec261856..e62058b0d2 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -40,11 +40,11 @@ * hardware. An example use would be for flash memory that's used for * execute in place. */ -# define __raw_ioswabb(a,x) (x) -# define __raw_ioswabw(a,x) (x) -# define __raw_ioswabl(a,x) (x) -# define __raw_ioswabq(a,x) (x) -# define ____raw_ioswabq(a,x) (x) +# define __raw_ioswabb(a, x) (x) +# define __raw_ioswabw(a, x) (x) +# define __raw_ioswabl(a, x) (x) +# define __raw_ioswabq(a, x) (x) +# define ____raw_ioswabq(a, x) (x) /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ @@ -178,6 +178,11 @@ extern void __iounmap(const volatile void __iomem *addr); static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, unsigned long flags) { + void __iomem *addr = plat_ioremap(offset, size, flags); + + if (addr) + return addr; + #define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) if (cpu_has_64bit_addresses) { @@ -207,7 +212,8 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, */ if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && flags == _CACHE_UNCACHED) - return (void __iomem *)CKSEG1ADDR(phys_addr); + return (void __iomem *) + (unsigned long)CKSEG1ADDR(phys_addr); } return __ioremap(offset, size, flags); @@ -282,6 +288,9 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, static inline void iounmap(const volatile void __iomem *addr) { + if (plat_iounmap(addr)) + return; + #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) if (cpu_has_64bit_addresses || @@ -545,6 +554,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int * caches. Dirty lines of the caches may be written back or simply * be discarded. This operation is necessary before dma operations * to the memory. + * + * This API used to be exported; it now is for arch code internal use only. */ #ifdef CONFIG_DMA_NONCOHERENT @@ -552,9 +563,9 @@ extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); -#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size) -#define dma_cache_wback(start, size) _dma_cache_wback(start,size) -#define dma_cache_inv(start, size) _dma_cache_inv(start,size) +#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) +#define dma_cache_wback(start, size) _dma_cache_wback(start, size) +#define dma_cache_inv(start, size) _dma_cache_inv(start, size) #else /* Sane hardware */ @@ -578,7 +589,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); #define __CSR_32_ADJUST 0 #endif -#define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) +#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) /*