free_page(addr);
totalram_pages++;
}
- if (addr > __START_KERNEL_map)
- global_flush_tlb();
+#ifdef CONFIG_DEBUG_RODATA
+ /*
+ * This will make the __init pages not present and
+ * not executable, so that any attempt to use a
+ * __init function from now on will fault immediately
+ * rather than supriously later when memory gets reused.
+ *
+ * We only do this for DEBUG_RODATA to not break up the
+ * 2Mb kernel mapping just for this debug feature.
+ */
+ if (begin >= __START_KERNEL_map) {
+ set_memory_np(begin, (end - begin)/PAGE_SIZE);
+ set_memory_nx(begin, (end - begin)/PAGE_SIZE);
+ }
+#endif
}
void free_initmem(void)
return change_page_attr_addr(addr, numpages, prot);
}
-
-
int set_memory_uc(unsigned long addr, int numpages)
{
pgprot_t uncached;
pgprot_val(rw) = _PAGE_RW;
return change_page_attr_clear(addr, numpages, rw);
}
-EXPORT_SYMBOL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages)
{
pgprot_val(rw) = _PAGE_RW;
return change_page_attr_set(addr, numpages, rw);
}
-EXPORT_SYMBOL(set_memory_rw);
+
+int set_memory_np(unsigned long addr, int numpages)
+{
+ pgprot_t present;
+
+ pgprot_val(present) = _PAGE_PRESENT;
+ return change_page_attr_clear(addr, numpages, present);
+}
int set_pages_uc(struct page *page, int numpages)
{
pgprot_val(rw) = _PAGE_RW;
return change_page_attr_clear(addr, numpages, rw);
}
-EXPORT_SYMBOL(set_pages_ro);
int set_pages_rw(struct page *page, int numpages)
{
pgprot_val(rw) = _PAGE_RW;
return change_page_attr_set(addr, numpages, rw);
}
-EXPORT_SYMBOL(set_pages_rw);
-
void clflush_cache_range(void *addr, int size)
{
EXPORT_SYMBOL(global_flush_tlb);
#ifdef CONFIG_DEBUG_PAGEALLOC
+
+static int __set_pages_p(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ return change_page_attr_set(addr, numpages,
+ __pgprot(_PAGE_PRESENT | _PAGE_RW));
+}
+
+static int __set_pages_np(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
+}
+
void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
* The return value is ignored - the calls cannot fail,
* large pages are disabled at boot time:
*/
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (enable)
+ __set_pages_p(page, numpages);
+ else
+ __set_pages_np(page, numpages);
/*
* We should perform an IPI and flush all tlbs,