]> err.no Git - linux-2.6/commitdiff
[PATCH] ARM: Fix delayed dcache flush for ARMv6 non-aliasing caches
authorRussell King <rmk@dyn-67.arm.linux.org.uk>
Mon, 20 Jun 2005 08:51:03 +0000 (09:51 +0100)
committerRussell King <rmk@dyn-67.arm.linux.org.uk>
Mon, 20 Jun 2005 08:51:03 +0000 (09:51 +0100)
flush_dcache_page() did nothing for these caches, but since they
suffer from I/D cache coherency issues, we need to ensure that data
is written back to RAM.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/mm/fault-armv.c
arch/arm/mm/flush.c

index 01967ddeef53df2cb72bda5d600419e63c392f82..be4ab3d73c91965448f064448136f7c078d9fb30 100644 (file)
@@ -77,9 +77,8 @@ no_pmd:
 }
 
 static void
-make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
+make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
 {
-       struct address_space *mapping = page_mapping(page);
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *mpnt;
        struct prio_tree_iter iter;
@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
        pgoff_t pgoff;
        int aliases = 0;
 
-       if (!mapping)
-               return;
-
        pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
 
        /*
@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
        if (aliases)
                adjust_pte(vma, addr);
        else
-               flush_cache_page(vma, addr, page_to_pfn(page));
+               flush_cache_page(vma, addr, pfn);
 }
 
+void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
 /*
  * Take care of architecture specific things when placing a new PTE into
  * a page table, or changing an existing PTE.  Basically, there are two
@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
+       struct address_space *mapping;
        struct page *page;
 
        if (!pfn_valid(pfn))
                return;
+
        page = pfn_to_page(pfn);
-       if (page_mapping(page)) {
+       mapping = page_mapping(page);
+       if (mapping) {
                int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 
-               if (dirty) {
-                       /*
-                        * This is our first userspace mapping of this page.
-                        * Ensure that the physical page is coherent with
-                        * the kernel mapping.
-                        *
-                        * FIXME: only need to do this on VIVT and aliasing
-                        *        VIPT cache architectures.  We can do that
-                        *        by choosing whether to set this bit...
-                        */
-                       __cpuc_flush_dcache_page(page_address(page));
-               }
+               if (dirty)
+                       __flush_dcache_page(mapping, page);
 
                if (cache_is_vivt())
-                       make_coherent(vma, addr, page, dirty);
+                       make_coherent(mapping, vma, addr, pfn);
        }
 }
 
index 4085ed983e46e07c3ac62ba31f8b0e605ee2594c..191788fb18d13a4f3adad73afd92a649c0c7ff21 100644 (file)
@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 #define flush_pfn_alias(pfn,vaddr)     do { } while (0)
 #endif
 
-static void __flush_dcache_page(struct address_space *mapping, struct page *page)
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
-       struct mm_struct *mm = current->active_mm;
-       struct vm_area_struct *mpnt;
-       struct prio_tree_iter iter;
-       pgoff_t pgoff;
-
        /*
         * Writeback any data associated with the kernel mapping of this
         * page.  This ensures that data in the physical page is mutually
@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
        __cpuc_flush_dcache_page(page_address(page));
 
        /*
-        * If there's no mapping pointer here, then this page isn't
-        * visible to userspace yet, so there are no cache lines
-        * associated with any other aliases.
-        */
-       if (!mapping)
-               return;
-
-       /*
-        * This is a page cache page.  If we have a VIPT cache, we
-        * only need to do one flush - which would be at the relevant
+        * If this is a page cache page, and we have an aliasing VIPT cache,
+        * we only need to do one flush - which would be at the relevant
         * userspace colour, which is congruent with page->index.
         */
-       if (cache_is_vipt()) {
-               if (cache_is_vipt_aliasing())
-                       flush_pfn_alias(page_to_pfn(page),
-                                       page->index << PAGE_CACHE_SHIFT);
-               return;
-       }
+       if (mapping && cache_is_vipt_aliasing())
+               flush_pfn_alias(page_to_pfn(page),
+                               page->index << PAGE_CACHE_SHIFT);
+}
+
+static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
+{
+       struct mm_struct *mm = current->active_mm;
+       struct vm_area_struct *mpnt;
+       struct prio_tree_iter iter;
+       pgoff_t pgoff;
 
        /*
         * There are possible user space mappings of this page:
@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
-       if (cache_is_vipt_nonaliasing())
-               return;
-
        if (mapping && !mapping_mapped(mapping))
                set_bit(PG_dcache_dirty, &page->flags);
-       else
+       else {
                __flush_dcache_page(mapping, page);
+               if (mapping && cache_is_vivt())
+                       __flush_dcache_aliases(mapping, page);
+       }
 }
 EXPORT_SYMBOL(flush_dcache_page);