From: Zachary Amsden Date: Sat, 3 Sep 2005 22:56:50 +0000 (-0700) Subject: [PATCH] i386: encapsulate copying of pgd entries X-Git-Tag: v2.6.14-rc1~844 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d7271b14b2e9e5905aba0fbf5c4dc4f8980c0cb2;p=linux-2.6 [PATCH] i386: encapsulate copying of pgd entries Add a clone operation for pgd updates. This helps complete the encapsulation of updates to page tables (or pages about to become page tables) into accessor functions rather than using memcpy() to duplicate them. This is both generally good for consistency and also necessary for running in a hypervisor which requires explicit updates to page table entries. The new function is: clone_pgd_range(pgd_t *dst, pgd_t *src, int count); dst - pointer to pgd range anwhere on a pgd page src - "" count - the number of pgds to copy. dst and src can be on the same page, but the range must not overlap and must not cross a page boundary. Note that I ommitted using this call to copy pgd entries into the software suspend page root, since this is not technically a live paging structure, rather it is used on resume from suspend. CC'ing Pavel in case he has any feedback on this. Thanks to Chris Wright for noticing that this could be more optimal in PAE compiles by eliminating the memset. Signed-off-by: Zachary Amsden Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 8ac8e9fd56..8e950cdff1 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -1017,8 +1017,8 @@ int __devinit smp_prepare_cpu(int cpu) tsc_sync_disabled = 1; /* init low mem mapping */ - memcpy(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, - sizeof(swapper_pg_dir[0]) * KERNEL_PGD_PTRS); + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, + KERNEL_PGD_PTRS); flush_tlb_all(); schedule_work(&task); wait_for_completion(&done); diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index bd2f7afc7a..dcdce2c6c5 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -207,19 +207,19 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; - if (PTRS_PER_PMD == 1) + if (PTRS_PER_PMD == 1) { + memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); + } - memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, + clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - + KERNEL_PGD_PTRS); if (PTRS_PER_PMD > 1) return; pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); - memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); } /* never called when PTRS_PER_PMD > 1 */ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index d74185aee1..47bc1ffa3d 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -277,6 +277,21 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, clear_bit(_PAGE_BIT_RW, &ptep->pte_low); } +/* + * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); + * + * dst - pointer to pgd range anwhere on a pgd page + * src - "" + * count - the number of pgds to copy. + * + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +{ + memcpy(dst, src, count * sizeof(pgd_t)); +} + /* * Macro to mark a page protection value as "uncacheable". On processors which do not support * it, this is a no-op.