]> err.no Git - linux-2.6/commitdiff
x86: map UV chipset space - pagetable
authorJack Steiner <steiner@sgi.com>
Tue, 1 Jul 2008 19:45:32 +0000 (14:45 -0500)
committerIngo Molnar <mingo@elte.hu>
Wed, 9 Jul 2008 05:43:23 +0000 (07:43 +0200)
Add boot-time function for creating additional 2MB page table entries for
mapping chipset specific cached/uncached ranges.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Cc: linux-mm@kvack.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/init_64.c
include/asm-x86/page_64.h
include/asm-x86/pgtable.h

index 77d129d62c974904fec4bf924c58f51b8a3d9d3c..57d5eff754c9709e9572ddee84b2a7212cc269c9 100644 (file)
@@ -201,6 +201,46 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 }
 
+/*
+ * Create large page table mappings for a range of physical addresses.
+ */
+static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+                                               pgprot_t prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
+       for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
+               pgd = pgd_offset_k((unsigned long)__va(phys));
+               if (pgd_none(*pgd)) {
+                       pud = (pud_t *) spp_getpage();
+                       set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pud = pud_offset(pgd, (unsigned long)__va(phys));
+               if (pud_none(*pud)) {
+                       pmd = (pmd_t *) spp_getpage();
+                       set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pmd = pmd_offset(pud, phys);
+               BUG_ON(!pmd_none(*pmd));
+               set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
+       }
+}
+
+void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
+}
+
+void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
+}
+
 /*
  * The head.S code sets up the kernel high mapping:
  *
index 010d12db80dc720b1b917f2defa626274f789e7e..c6916c83e6b1aee454e8a8d55ccd787652bee88e 100644 (file)
@@ -91,6 +91,10 @@ extern unsigned long init_memory_mapping(unsigned long start,
                                         unsigned long end);
 
 extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_FLATMEM
index 64de07e253299e17be370fe74a1113452990ed07..49cbd76b9547ed3d0bf3075f4f6980a0143858c7 100644 (file)
@@ -91,6 +91,7 @@
 #define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
 #define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
 
 #define PAGE_KERNEL                    __pgprot(__PAGE_KERNEL)
 #define PAGE_KERNEL_UC_MINUS           __pgprot(__PAGE_KERNEL_UC_MINUS)
 #define PAGE_KERNEL_EXEC_NOCACHE       __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
 #define PAGE_KERNEL_LARGE              __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_NOCACHE      __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
 #define PAGE_KERNEL_LARGE_EXEC         __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 #define PAGE_KERNEL_VSYSCALL           __pgprot(__PAGE_KERNEL_VSYSCALL)
 #define PAGE_KERNEL_VSYSCALL_NOCACHE   __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)