#include <linux/slab.h>
#include <linux/mm.h>
+void clflush_cache_range(void *addr, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
+ clflush(addr+i);
+}
+
#include <asm/processor.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <asm/pgalloc.h>
pte_t *lookup_address(unsigned long address, int *level)
{
}
static struct page *
-split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
+split_large_page(unsigned long address, pgprot_t ref_prot)
{
unsigned long addr;
struct page *base;
base = alloc_pages(GFP_KERNEL, 0);
if (!base)
return NULL;
- /*
- * page_private is used to track the number of entries in
- * the page table page have non standard attributes.
- */
- SetPagePrivate(base);
- page_private(base) = 0;
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
- for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
- pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
- addr == address ? prot : ref_prot);
- }
- return base;
-}
-
-void clflush_cache_range(void *addr, int size)
-{
- int i;
-
- for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
- clflush(addr+i);
-}
+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT, ref_prot);
-static void flush_kernel_map(void *arg)
-{
- struct list_head *l = (struct list_head *)arg;
- struct page *pg;
-
- __flush_tlb_all();
-
- /* When clflush is available always use it because it is
- much cheaper than WBINVD. */
- /* clflush is still broken. Disable for now. */
- if (1 || !cpu_has_clflush) {
- wbinvd();
- } else {
- list_for_each_entry(pg, l, lru) {
- void *addr = page_address(pg);
-
- clflush_cache_range(addr, PAGE_SIZE);
- }
- }
-}
-
-static inline void flush_map(struct list_head *l)
-{
- on_each_cpu(flush_kernel_map, l, 1, 1);
-}
-
-static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
-
-static inline void save_page(struct page *fpage)
-{
- if (!test_and_set_bit(PG_arch_1, &fpage->flags))
- list_add(&fpage->lru, &deferred_pages);
-}
-
-/*
- * No more special protections in this 2/4MB area - revert to a
- * large page again.
- */
-static void revert_page(unsigned long address, pgprot_t ref_prot)
-{
- unsigned long pfn;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t large_pte;
-
- pgd = pgd_offset_k(address);
- BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, address);
- BUG_ON(pud_none(*pud));
- pmd = pmd_offset(pud, address);
- BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
- pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
- large_pte = pfn_pte(pfn, ref_prot);
- large_pte = pte_mkhuge(large_pte);
-
- set_pte((pte_t *)pmd, large_pte);
+ return base;
}
static int
-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
- pgprot_t ref_prot)
+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
{
struct page *kpte_page;
- pgprot_t ref_prot2;
pte_t *kpte;
+ pgprot_t ref_prot2, oldprot;
int level;
+repeat:
kpte = lookup_address(address, &level);
if (!kpte)
return 0;
kpte_page = virt_to_page(kpte);
+ oldprot = pte_pgprot(*kpte);
BUG_ON(PageLRU(kpte_page));
BUG_ON(PageCompound(kpte_page));
- if (pgprot_val(prot) != pgprot_val(ref_prot)) {
- if (level == 4) {
- set_pte(kpte, pfn_pte(pfn, prot));
- } else {
- /*
- * split_large_page will take the reference for this
- * change_page_attr on the split page.
- */
- struct page *split;
+ prot = canon_pgprot(prot);
- ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
- split = split_large_page(address, prot, ref_prot2);
- if (!split)
- return -ENOMEM;
- pgprot_val(ref_prot2) &= ~_PAGE_NX;
- set_pte(kpte, mk_pte(split, ref_prot2));
- kpte_page = split;
- }
- page_private(kpte_page)++;
+ if (level == 4) {
+ set_pte(kpte, pfn_pte(pfn, prot));
} else {
- if (level == 4) {
- set_pte(kpte, pfn_pte(pfn, ref_prot));
- BUG_ON(page_private(kpte_page) == 0);
- page_private(kpte_page)--;
- } else
- BUG();
+ /*
+ * split_large_page will take the reference for this
+ * change_page_attr on the split page.
+ */
+ struct page *split;
+
+ ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
+ split = split_large_page(address, ref_prot2);
+ if (!split)
+ return -ENOMEM;
+ pgprot_val(ref_prot2) &= ~_PAGE_NX;
+ set_pte(kpte, mk_pte(split, ref_prot2));
+ goto repeat;
}
- /* on x86-64 the direct mapping set at boot is not using 4k pages */
- BUG_ON(PageReserved(kpte_page));
-
- save_page(kpte_page);
- if (page_private(kpte_page) == 0)
- revert_page(address, ref_prot);
return 0;
}
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * This should be used when a page is mapped with a different caching policy
- * than write-back somewhere - some CPUs do not like it when mappings with
- * different caching policies exist. This changes the page attributes of the
- * in kernel linear mapping too.
+/**
+ * change_page_attr_addr - Change page table attributes in linear mapping
+ * @address: Virtual address in linear mapping.
+ * @numpages: Number of pages to change
+ * @prot: New page table attribute (PAGE_*)
*
- * The caller needs to ensure that there are no conflicting mappings elsewhere.
- * This function only deals with the kernel linear map.
+ * Change page attributes of a page in the direct mapping. This is a variant
+ * of change_page_attr() that also works on memory holes that do not have
+ * mem_map entry (pfn_valid() is false).
*
- * Caller must call global_flush_tlb() after this.
+ * See change_page_attr() documentation for more details.
*/
+
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{
int err = 0, kernel_map = 0, i;
unsigned long pfn = __pa(address) >> PAGE_SHIFT;
if (!kernel_map || pte_present(pfn_pte(0, prot))) {
- err = __change_page_attr(address, pfn, prot,
- PAGE_KERNEL);
+ err = __change_page_attr(address, pfn, prot);
if (err)
break;
}
addr2 = __START_KERNEL_map + __pa(address);
/* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
- err = __change_page_attr(addr2, pfn, prot2,
- PAGE_KERNEL_EXEC);
+ err = __change_page_attr(addr2, pfn, prot2);
}
}
up_write(&init_mm.mmap_sem);
return err;
}
-/* Don't call this for MMIO areas that may not have a mem_map entry */
+/**
+ * change_page_attr - Change page table attributes in the linear mapping.
+ * @page: First page to change
+ * @numpages: Number of pages to change
+ * @prot: New protection/caching type (PAGE_*)
+ *
+ * Returns 0 on success, otherwise a negated errno.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * Caller must call global_flush_tlb() later to make the changes active.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere
+ * (e.g. in user space) * This function only deals with the kernel linear map.
+ *
+ * For MMIO areas without mem_map use change_page_attr_addr() instead.
+ */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
unsigned long addr = (unsigned long)page_address(page);
}
EXPORT_SYMBOL(change_page_attr);
-void global_flush_tlb(void)
+static void flush_kernel_map(void *arg)
{
- struct page *pg, *next;
- struct list_head l;
-
/*
- * Write-protect the semaphore, to exclude two contexts
- * doing a list_replace_init() call in parallel and to
- * exclude new additions to the deferred_pages list:
+ * Flush all to work around Errata in early athlons regarding
+ * large page flushing.
*/
- down_write(&init_mm.mmap_sem);
- list_replace_init(&deferred_pages, &l);
- up_write(&init_mm.mmap_sem);
+ __flush_tlb_all();
+
+ if (boot_cpu_data.x86_model >= 4)
+ wbinvd();
+}
- flush_map(&l);
+void global_flush_tlb(void)
+{
+ BUG_ON(irqs_disabled());
- list_for_each_entry_safe(pg, next, &l, lru) {
- list_del(&pg->lru);
- clear_bit(PG_arch_1, &pg->flags);
- if (page_private(pg) != 0)
- continue;
- ClearPagePrivate(pg);
- __free_page(pg);
- }
+ on_each_cpu(flush_kernel_map, NULL, 1, 1);
}
EXPORT_SYMBOL(global_flush_tlb);