#include <asm/proc-fns.h>
#include <asm/arch/vmalloc.h>
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * Note that platforms may override VMALLOC_START, but they must provide
+ * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
+ * which may not overlap IO space.
+ */
+#ifndef VMALLOC_START
+#define VMALLOC_OFFSET (8*1024*1024)
+#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+#endif
+
/*
* Hardware-wise, we have a two level page table structure, where the first
* level has 4096 entries, and the second level has 256 entries. Each entry
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/*
* - coarse table (not used)
/*
* - extended small page/tiny page
*/
+#define PTE_EXT_XN (1 << 0) /* v6 */
#define PTE_EXT_AP_MASK (3 << 4)
+#define PTE_EXT_AP0 (1 << 4)
+#define PTE_EXT_AP1 (2 << 4)
#define PTE_EXT_AP_UNO_SRO (0 << 4)
-#define PTE_EXT_AP_UNO_SRW (1 << 4)
-#define PTE_EXT_AP_URO_SRW (2 << 4)
-#define PTE_EXT_AP_URW_SRW (3 << 4)
+#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
+#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
+#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
+#define PTE_EXT_APX (1 << 9) /* v6 */
+#define PTE_EXT_SHARED (1 << 10) /* v6 */
+#define PTE_EXT_NG (1 << 11) /* v6 */
/*
* - small page
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
+#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
+#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
#ifndef __ASSEMBLY__
#define pgd_clear(pgdp) do { } while (0)
#define set_pgd(pgd,pgdp) do { } while (0)
-#define page_pte_prot(page,prot) mk_pte(page, prot)
-#define page_pte(page) mk_pte(page, __pgprot(0))
-
/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define HAVE_ARCH_UNMAPPED_AREA
/*
- * remap a physical address `phys' of size `size' with page protection `prot'
+ * remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
-#define io_remap_page_range(vma,from,phys,size,prot) \
- remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
-
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)