X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-generic%2Fpgtable.h;h=358e4d309ceb179776f2a7036dabce02cdfad1d2;hb=8c65b4a60450590e79a28e9717ceffa9e4debb3f;hp=f86c1e549466a0809c02fecc750cb5627ed08027;hpb=ef88b7dba2b47c70037a34a599d383462bb74bd3;p=linux-2.6 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f86c1e5494..358e4d309c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -8,7 +8,7 @@ * - update the page tables * - inform the TLB about the new one * - * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. + * We hold the mm semaphore for reading, and the pte lock. * * Note: the old pte is known to not be writable, so we don't need to * worry about dirty bits etc getting lost. @@ -128,6 +128,7 @@ do { \ #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; @@ -158,6 +159,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif +#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#else +#define move_pte(pte, prot, old_addr, new_addr) \ +({ \ + pte_t newpte = (pte); \ + if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ + pte_page(pte) == ZERO_PAGE(old_addr)) \ + newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ + newpte; \ +}) +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no