#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
/* Accessing the MMU control register. */
-extern __inline__ unsigned int srmmu_get_mmureg(void)
+static inline unsigned int srmmu_get_mmureg(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
return retval;
}
-extern __inline__ void srmmu_set_mmureg(unsigned long regval)
+static inline void srmmu_set_mmureg(unsigned long regval)
{
__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
"r" (regval), "i" (ASI_M_MMUREGS) : "memory");
}
-extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
+static inline void srmmu_set_ctable_ptr(unsigned long paddr)
{
paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"memory");
}
-extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
+static inline unsigned long srmmu_get_ctable_ptr(void)
{
unsigned int retval;
return (retval & SRMMU_CTX_PMASK) << 4;
}
-extern __inline__ void srmmu_set_context(int context)
+static inline void srmmu_set_context(int context)
{
__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"r" (context), "r" (SRMMU_CTX_REG),
"i" (ASI_M_MMUREGS) : "memory");
}
-extern __inline__ int srmmu_get_context(void)
+static inline int srmmu_get_context(void)
{
register int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
return retval;
}
-extern __inline__ unsigned int srmmu_get_fstatus(void)
+static inline unsigned int srmmu_get_fstatus(void)
{
unsigned int retval;
return retval;
}
-extern __inline__ unsigned int srmmu_get_faddr(void)
+static inline unsigned int srmmu_get_faddr(void)
{
unsigned int retval;
}
/* This is guaranteed on all SRMMU's. */
-extern __inline__ void srmmu_flush_whole_tlb(void)
+static inline void srmmu_flush_whole_tlb(void)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (0x400), /* Flush entire TLB!! */
}
/* These flush types are not available on all chips... */
-extern __inline__ void srmmu_flush_tlb_ctx(void)
+static inline void srmmu_flush_tlb_ctx(void)
{
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (0x300), /* Flush TLB ctx.. */
}
-extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
+static inline void srmmu_flush_tlb_region(unsigned long addr)
{
addr &= SRMMU_PGDIR_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
}
-extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
+static inline void srmmu_flush_tlb_segment(unsigned long addr)
{
addr &= SRMMU_REAL_PMD_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
}
-extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
+static inline void srmmu_flush_tlb_page(unsigned long page)
{
page &= PAGE_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
}
-extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
+static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
{
unsigned long retval;
return retval;
}
-extern __inline__ int
+static inline int
srmmu_get_pte (unsigned long addr)
{
register unsigned long entry;