From: Richard Purdie Date: Sat, 30 Dec 2006 15:08:50 +0000 (+0100) Subject: [ARM] 4078/1: Fix ARM copypage cache coherency problems X-Git-Tag: v2.6.20-rc4~94^2~3 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1c9d3df5e88ad7db23f5b22f4341c39722a904a4;p=linux-2.6 [ARM] 4078/1: Fix ARM copypage cache coherency problems If PG_dcache_dirty is set for a page, we need to flush the source page before performing any copypage operation using a different virtual address. This fixes the copypage implementations for XScale, StrongARM and ARMv6. This patch fixes segmentation faults seen in the dynamic linker under the usage patterns in glibc 2.4/2.5. Signed-off-by: Richard Purdie Signed-off-by: Russell King --- diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 408b05ae6b..ded0e96d06 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "mm.h" @@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to) void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) { + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); + spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 865777dec1..3adb79257f 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long from, to; + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); /* * Discard data in the kernel mapping for the new page. diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aea5da7235..2e455f82a4 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "mm.h" @@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to) void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) { + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); + spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h index 378a3a2ce8..d51049522c 100644 --- a/include/asm-arm/cacheflush.h +++ b/include/asm-arm/cacheflush.h @@ -355,6 +355,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, */ extern void flush_dcache_page(struct page *); +extern void __flush_dcache_page(struct address_space *mapping, struct page *page); + #define flush_dcache_mmap_lock(mapping) \ write_lock_irq(&(mapping)->tree_lock) #define flush_dcache_mmap_unlock(mapping) \