2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/uaccess.h>
17 pte_t *lookup_address(unsigned long address, int *level)
19 pgd_t *pgd = pgd_offset_k(address);
26 pud = pud_offset(pgd, address);
27 if (!pud_present(*pud))
29 pmd = pmd_offset(pud, address);
30 if (!pmd_present(*pmd))
37 pte = pte_offset_kernel(pmd, address);
38 if (pte && !pte_present(*pte))
45 split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
52 base = alloc_pages(GFP_KERNEL, 0);
56 * page_private is used to track the number of entries in
57 * the page table page have non standard attributes.
60 page_private(base) = 0;
62 address = __pa(address);
63 addr = address & LARGE_PAGE_MASK;
64 pbase = (pte_t *)page_address(base);
65 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
66 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
67 addr == address ? prot : ref_prot);
72 void clflush_cache_range(void *addr, int size)
76 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
80 static void flush_kernel_map(void *arg)
82 struct list_head *l = (struct list_head *)arg;
85 /* When clflush is available always use it because it is
86 much cheaper than WBINVD. */
87 /* clflush is still broken. Disable for now. */
88 if (1 || !cpu_has_clflush) {
91 list_for_each_entry(pg, l, lru) {
92 void *addr = page_address(pg);
94 clflush_cache_range(addr, PAGE_SIZE);
100 static inline void flush_map(struct list_head *l)
102 on_each_cpu(flush_kernel_map, l, 1, 1);
105 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
107 static inline void save_page(struct page *fpage)
109 if (!test_and_set_bit(PG_arch_1, &fpage->flags))
110 list_add(&fpage->lru, &deferred_pages);
114 * No more special protections in this 2/4MB area - revert to a
117 static void revert_page(unsigned long address, pgprot_t ref_prot)
125 pgd = pgd_offset_k(address);
126 BUG_ON(pgd_none(*pgd));
127 pud = pud_offset(pgd, address);
128 BUG_ON(pud_none(*pud));
129 pmd = pmd_offset(pud, address);
130 BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
131 pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
132 large_pte = pfn_pte(pfn, ref_prot);
133 large_pte = pte_mkhuge(large_pte);
135 set_pte((pte_t *)pmd, large_pte);
139 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
142 struct page *kpte_page;
147 kpte = lookup_address(address, &level);
151 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
152 BUG_ON(PageLRU(kpte_page));
153 BUG_ON(PageCompound(kpte_page));
154 if (pgprot_val(prot) != pgprot_val(ref_prot)) {
155 if (!pte_huge(*kpte)) {
156 set_pte(kpte, pfn_pte(pfn, prot));
159 * split_large_page will take the reference for this
160 * change_page_attr on the split page.
164 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
165 split = split_large_page(address, prot, ref_prot2);
168 pgprot_val(ref_prot2) &= ~_PAGE_NX;
169 set_pte(kpte, mk_pte(split, ref_prot2));
172 page_private(kpte_page)++;
174 if (!pte_huge(*kpte)) {
175 set_pte(kpte, pfn_pte(pfn, ref_prot));
176 BUG_ON(page_private(kpte_page) == 0);
177 page_private(kpte_page)--;
182 /* on x86-64 the direct mapping set at boot is not using 4k pages */
183 BUG_ON(PageReserved(kpte_page));
185 save_page(kpte_page);
186 if (page_private(kpte_page) == 0)
187 revert_page(address, ref_prot);
192 * Change the page attributes of an page in the linear mapping.
194 * This should be used when a page is mapped with a different caching policy
195 * than write-back somewhere - some CPUs do not like it when mappings with
196 * different caching policies exist. This changes the page attributes of the
197 * in kernel linear mapping too.
199 * The caller needs to ensure that there are no conflicting mappings elsewhere.
200 * This function only deals with the kernel linear map.
202 * Caller must call global_flush_tlb() after this.
204 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
206 int err = 0, kernel_map = 0, i;
208 if (address >= __START_KERNEL_map &&
209 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
211 address = (unsigned long)__va(__pa(address));
215 down_write(&init_mm.mmap_sem);
216 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
217 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
219 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
220 err = __change_page_attr(address, pfn, prot,
225 /* Handle kernel mapping too which aliases part of the
227 if (__pa(address) < KERNEL_TEXT_SIZE) {
231 addr2 = __START_KERNEL_map + __pa(address);
232 /* Make sure the kernel mappings stay executable */
233 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
234 err = __change_page_attr(addr2, pfn, prot2,
238 up_write(&init_mm.mmap_sem);
243 /* Don't call this for MMIO areas that may not have a mem_map entry */
244 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
246 unsigned long addr = (unsigned long)page_address(page);
248 return change_page_attr_addr(addr, numpages, prot);
250 EXPORT_SYMBOL(change_page_attr);
252 void global_flush_tlb(void)
254 struct page *pg, *next;
258 * Write-protect the semaphore, to exclude two contexts
259 * doing a list_replace_init() call in parallel and to
260 * exclude new additions to the deferred_pages list:
262 down_write(&init_mm.mmap_sem);
263 list_replace_init(&deferred_pages, &l);
264 up_write(&init_mm.mmap_sem);
268 list_for_each_entry_safe(pg, next, &l, lru) {
270 clear_bit(PG_arch_1, &pg->flags);
271 if (page_private(pg) != 0)
273 ClearPagePrivate(pg);
277 EXPORT_SYMBOL(global_flush_tlb);