X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fx86%2Fmm%2Finit_32.c;h=4d115654519442babfa3e0d2a3f4d4fc3749a9b6;hb=3c1df68b848b39270752ff8d4b956cc4a4dce0f6;hp=3c76d194fd2ccef4df106a2f47fe9b02a3057264;hpb=3568834e813e0dd7547035b3148b2f2a2b48ee4e;p=linux-2.6 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 3c76d194fd..4d11565451 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -42,6 +41,7 @@ #include #include #include +#include #include #include @@ -66,7 +66,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); - paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT); + paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); if (pmd_table != pmd_offset(pud, 0)) @@ -165,16 +165,25 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) pmd = one_md_table_init(pgd); if (pfn >= max_low_pfn) continue; - for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) { + for (pmd_idx = 0; + pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; + pmd++, pmd_idx++) { unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; - /* Map with big pages if possible, otherwise create normal page tables. */ + /* Map with big pages if possible, otherwise + create normal page tables. */ if (cpu_has_pse) { - unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; - if (is_kernel_text(address) || is_kernel_text(address2)) - set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); - else - set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE)); + unsigned int address2; + pgprot_t prot = PAGE_KERNEL_LARGE; + + address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + + PAGE_OFFSET + PAGE_SIZE-1; + + if (is_kernel_text(address) || + is_kernel_text(address2)) + prot = PAGE_KERNEL_LARGE_EXEC; + + set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; } else { @@ -183,10 +192,12 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { + pgprot_t prot = PAGE_KERNEL; + if (is_kernel_text(address)) - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); - else - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL)); + prot = PAGE_KERNEL_EXEC; + + set_pte(pte, pfn_pte(pfn, prot)); } } } @@ -200,45 +211,6 @@ static inline int page_kills_ppro(unsigned long pagenr) return 0; } -int page_is_ram(unsigned long pagenr) -{ - int i; - unsigned long addr, end; - - if (efi_enabled) { - efi_memory_desc_t *md; - void *p; - - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { - md = p; - if (!is_available_memory(md)) - continue; - addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT; - end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT; - - if ((pagenr >= addr) && (pagenr < end)) - return 1; - } - return 0; - } - - for (i = 0; i < e820.nr_map; i++) { - - if (e820.map[i].type != E820_RAM) /* not usable memory */ - continue; - /* - * !!!FIXME!!! Some BIOSen report areas as RAM that - * are not. Notably the 640->1Mb area. We need a sanity - * check here. - */ - addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT; - end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT; - if ((pagenr >= addr) && (pagenr < end)) - return 1; - } - return 0; -} - #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; pgprot_t kmap_prot; @@ -338,9 +310,9 @@ static void __init set_highmem_pages_init(int bad_ppro) #define set_highmem_pages_init(bad_ppro) do { } while (0) #endif /* CONFIG_HIGHMEM */ -unsigned long long __PAGE_KERNEL = _PAGE_KERNEL; +pteval_t __PAGE_KERNEL = _PAGE_KERNEL; EXPORT_SYMBOL(__PAGE_KERNEL); -unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; +pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; #ifdef CONFIG_NUMA extern void __init remap_numa_kva(void); @@ -372,7 +344,7 @@ void __init native_pagetable_setup_start(pgd_t *base) memset(&base[USER_PTRS_PER_PGD], 0, KERNEL_PGD_PTRS * sizeof(pgd_t)); #else - paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT); + paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); #endif } @@ -435,9 +407,11 @@ static void __init pagetable_init (void) * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ + early_ioremap_clear(); vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); + early_ioremap_reset(); permanent_kmaps_init(pgd_base); @@ -485,11 +459,12 @@ void zap_low_mappings (void) int nx_enabled = 0; +pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX; +EXPORT_SYMBOL_GPL(__supported_pte_mask); + #ifdef CONFIG_X86_PAE static int disable_nx __initdata = 0; -u64 __supported_pte_mask __read_mostly = ~_PAGE_NX; -EXPORT_SYMBOL_GPL(__supported_pte_mask); /* * noexec = on|off @@ -532,34 +507,6 @@ static void __init set_nx(void) } } -/* - * Enables/disables executability of a given kernel page and - * returns the previous setting. - */ -int __init set_kernel_exec(unsigned long vaddr, int enable) -{ - pte_t *pte; - int ret = 1; - - if (!nx_enabled) - goto out; - - pte = lookup_address(vaddr); - BUG_ON(!pte); - - if (!pte_exec_kernel(*pte)) - ret = 0; - - if (enable) - pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); - else - pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); - pte_update_defer(&init_mm, vaddr, pte); - __flush_tlb_all(); -out: - return ret; -} - #endif /* @@ -801,25 +748,31 @@ void mark_rodata_ro(void) if (num_possible_cpus() <= 1) #endif { - change_page_attr(virt_to_page(start), - size >> PAGE_SHIFT, PAGE_KERNEL_RX); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk("Write protecting the kernel text: %luk\n", size >> 10); + +#ifdef CONFIG_CPA_DEBUG + printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); + set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); + + printk("Testing CPA: write protecting again\n"); + set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); +#endif } #endif start += size; size = (unsigned long)__end_rodata - start; - change_page_attr(virt_to_page(start), - size >> PAGE_SHIFT, PAGE_KERNEL_RO); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk("Write protecting the kernel read-only data: %luk\n", size >> 10); - /* - * change_page_attr() requires a global_flush_tlb() call after it. - * We do this after the printk so that if something went wrong in the - * change, the printk gets out at least to give a better debug hint - * of who is the culprit. - */ - global_flush_tlb(); +#ifdef CONFIG_CPA_DEBUG + printk("Testing CPA: undo %lx-%lx\n", start, start + size); + set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); + + printk("Testing CPA: write protecting again\n"); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); +#endif } #endif @@ -827,6 +780,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; + /* + * We just marked the kernel text read only above, now that + * we are going to free part of that, we need to make that + * writeable first. + */ + set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); + for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr));