]> err.no Git - linux-2.6/commitdiff
[PATCH] lock PTE before updating it in 440/BookE page fault handler
authorEugene Surovegin <ebs@ebshome.net>
Tue, 28 Mar 2006 18:13:12 +0000 (10:13 -0800)
committerPaul Mackerras <paulus@samba.org>
Wed, 29 Mar 2006 02:44:15 +0000 (13:44 +1100)
Fix 44x and BookE page fault handler to correctly lock PTE before
trying to pte_update() it, otherwise this PTE might be swapped out
after pte_present() check but before pte_uptdate() call, resulting in
corrupted PTE. This can happen with enabled preemption and low memory
condition.

Signed-off-by: Eugene Surovegin <ebs@ebshome.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/powerpc/mm/fault.c
arch/powerpc/mm/pgtable_32.c
arch/ppc/mm/fault.c
arch/ppc/mm/pgtable.c
include/asm-ppc/pgtable.h

index ec4adcb4bc28c6192590b901aa2c3a04d0792bdf..5aea0909a5ec91d3fd7a2cca3c8692db3cb55490 100644 (file)
@@ -267,25 +267,29 @@ good_area:
 #endif
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
                pte_t *ptep;
+               pmd_t *pmdp;
 
                /* Since 4xx/Book-E supports per-page execute permission,
                 * we lazily flush dcache to icache. */
                ptep = NULL;
-               if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
-                       struct page *page = pte_page(*ptep);
-
-                       if (! test_bit(PG_arch_1, &page->flags)) {
-                               flush_dcache_icache_page(page);
-                               set_bit(PG_arch_1, &page->flags);
+               if (get_pteptr(mm, address, &ptep, &pmdp)) {
+                       spinlock_t *ptl = pte_lockptr(mm, pmdp);
+                       spin_lock(ptl);
+                       if (pte_present(*ptep)) {
+                               struct page *page = pte_page(*ptep);
+
+                               if (!test_bit(PG_arch_1, &page->flags)) {
+                                       flush_dcache_icache_page(page);
+                                       set_bit(PG_arch_1, &page->flags);
+                               }
+                               pte_update(ptep, 0, _PAGE_HWEXEC);
+                               _tlbie(address);
+                               pte_unmap_unlock(ptep, ptl);
+                               up_read(&mm->mmap_sem);
+                               return 0;
                        }
-                       pte_update(ptep, 0, _PAGE_HWEXEC);
-                       _tlbie(address);
-                       pte_unmap(ptep);
-                       up_read(&mm->mmap_sem);
-                       return 0;
+                       pte_unmap_unlock(ptep, ptl);
                }
-               if (ptep != NULL)
-                       pte_unmap(ptep);
 #endif
        /* a write */
        } else if (is_write) {
index d296eb6b45450939335575c2b307d96d5cca6ec2..90628601fac73ad59a0b4c92b15b008b96a4ddd8 100644 (file)
@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
  * the PTE pointer is unmodified if PTE is not found.
  */
 int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
 {
         pgd_t  *pgd;
         pmd_t  *pmd;
@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
                         if (pte) {
                                retval = 1;
                                *ptep = pte;
+                               if (pmdp)
+                                       *pmdp = pmd;
                                /* XXX caller needs to do pte_unmap, yuck */
                         }
                 }
@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr)
                mm = &init_mm;
 
        pa = 0;
-       if (get_pteptr(mm, addr, &pte)) {
+       if (get_pteptr(mm, addr, &pte, NULL)) {
                pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
                pte_unmap(pte);
        }
index 0217188ef4656a44a37fedf98d06c38f34037dcf..8e08ca32531a878d4b450910ab437c4125cdf6dc 100644 (file)
@@ -202,6 +202,7 @@ good_area:
        /* an exec  - 4xx/Book-E allows for per-page execute permission */
        } else if (TRAP(regs) == 0x400) {
                pte_t *ptep;
+               pmd_t *pmdp;
 
 #if 0
                /* It would be nice to actually enforce the VM execute
@@ -215,21 +216,24 @@ good_area:
                /* Since 4xx/Book-E supports per-page execute permission,
                 * we lazily flush dcache to icache. */
                ptep = NULL;
-               if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
-                       struct page *page = pte_page(*ptep);
-
-                       if (! test_bit(PG_arch_1, &page->flags)) {
-                               flush_dcache_icache_page(page);
-                               set_bit(PG_arch_1, &page->flags);
+               if (get_pteptr(mm, address, &ptep, &pmdp)) {
+                       spinlock_t *ptl = pte_lockptr(mm, pmdp);
+                       spin_lock(ptl);
+                       if (pte_present(*ptep)) {
+                               struct page *page = pte_page(*ptep);
+
+                               if (!test_bit(PG_arch_1, &page->flags)) {
+                                       flush_dcache_icache_page(page);
+                                       set_bit(PG_arch_1, &page->flags);
+                               }
+                               pte_update(ptep, 0, _PAGE_HWEXEC);
+                               _tlbie(address);
+                               pte_unmap_unlock(ptep, ptl);
+                               up_read(&mm->mmap_sem);
+                               return 0;
                        }
-                       pte_update(ptep, 0, _PAGE_HWEXEC);
-                       _tlbie(address);
-                       pte_unmap(ptep);
-                       up_read(&mm->mmap_sem);
-                       return 0;
+                       pte_unmap_unlock(ptep, ptl);
                }
-               if (ptep != NULL)
-                       pte_unmap(ptep);
 #endif
        /* a read */
        } else {
index a1924876cad69edd0d2f057f8837ec3650815685..706bca8eb1448f826ff5ddab5874e8858966870e 100644 (file)
@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
  * the PTE pointer is unmodified if PTE is not found.
  */
 int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
 {
         pgd_t  *pgd;
         pmd_t  *pmd;
@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
                         if (pte) {
                                retval = 1;
                                *ptep = pte;
+                               if (pmdp)
+                                       *pmdp = pmd;
                                /* XXX caller needs to do pte_unmap, yuck */
                         }
                 }
@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr)
                mm = &init_mm;
 
        pa = 0;
-       if (get_pteptr(mm, addr, &pte)) {
+       if (get_pteptr(mm, addr, &pte, NULL)) {
                pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
                pte_unmap(pte);
        }
index e1c62da12e74a5e3cc58fca1b486599e1b5302b7..570b355162fae0bba5247e84c01407fe55e4ab88 100644 (file)
@@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
  */
 #define pgtable_cache_init()   do { } while (0)
 
-extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+                     pmd_t **pmdp);
 
 #include <asm-generic/pgtable.h>