return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
#else
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
+ if (pte)
+ pgtable_page_ctor(pte);
return pte;
}
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
-static void pgd_mop_up_pmds(pgd_t *pgdp)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
pgdp[i] = native_make_pgd(0);
paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(pmd);
+ pmd_free(mm, pmd);
}
}
}
pmd_t *pmd = pmd_alloc_one(mm, addr);
if (!pmd) {
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
return 0;
}
return 1;
}
-static void pgd_mop_up_pmds(pgd_t *pgd)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
}
#endif /* CONFIG_X86_PAE */
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
quicklist_free(0, pgd_dtor, pgd);
}
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
+ pgtable_page_dtor(pte);
paravirt_release_pt(page_to_pfn(pte));
tlb_remove_page(tlb, pte);
}
void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
- /* This is called just after the pmd has been detached from
- the pgd, which requires a full tlb flush to be recognized
- by the CPU. Rather than incurring multiple tlb flushes
- while the address space is being pulled down, make the tlb
- gathering machinery do a full flush when we're done. */
- tlb->fullmm = 1;
-
paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
tlb_remove_page(tlb, virt_to_page(pmd));
}