clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
if (!vectors_high()) {
- /*
- * This lock is here just to satisfy pmd_alloc and pte_lock
- */
- spin_lock(&mm->page_table_lock);
-
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
set_pte(new_pte, *init_pte);
pte_unmap_nested(init_pte);
pte_unmap(new_pte);
-
- spin_unlock(&mm->page_table_lock);
}
return new_pgd;
no_pte:
- spin_unlock(&mm->page_table_lock);
pmd_free(new_pmd);
- free_pages((unsigned long)new_pgd, 2);
- return NULL;
-
no_pmd:
- spin_unlock(&mm->page_table_lock);
free_pages((unsigned long)new_pgd, 2);
- return NULL;
-
no_pgd:
return NULL;
}
if (!new_pgd)
goto no_pgd;
- /*
- * This lock is here just to satisfy pmd_alloc and pte_lock
- * FIXME: I bet we could avoid taking it pretty much altogether
- */
- spin_lock(&mm->page_table_lock);
-
/*
* On ARM, first page must always be allocated since it contains
* the machine vectors.
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
- spin_unlock(&mm->page_table_lock);
-
/* update MEMC tables */
cpu_memc_update_all(new_pgd);
return new_pgd;
no_pte:
- spin_unlock(&mm->page_table_lock);
pmd_free(new_pmd);
- free_pgd_slow(new_pgd);
- return NULL;
-
no_pmd:
- spin_unlock(&mm->page_table_lock);
free_pgd_slow(new_pgd);
- return NULL;
-
no_pgd:
return NULL;
}
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
- spin_lock(&mm->page_table_lock);
while (from < end) {
- pmd_t *pmd = pmd_alloc(current->mm, dir, from);
+ pmd_t *pmd = pmd_alloc(mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end);
return error;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
- spin_lock(&mm->page_table_lock);
while (from < end) {
- pud_t *pud = pud_alloc(current->mm, dir, from);
+ pud_t *pud = pud_alloc(mm, dir, from);
error = -ENOMEM;
if (!pud)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(vma, beg, end);
return error;
}
pmd_t *pmd;
pte_t *pte;
- spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc);
if (!pud)
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte);
*pte = pte_wrprotect(*pte);
- spin_unlock(&mm->page_table_lock);
return(0);
out_pmd:
out_pte:
pmd_free(pmd);
out:
- spin_unlock(&mm->page_table_lock);
return(-ENOMEM);
}