X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fmsync.c;h=0e040e9c39d835a7cf938468be6a9b7bccb95c2d;hb=664beed0190fae687ac51295694004902ddeb18e;hp=3b5f1c521d4b580896504d31778441c10d25531a;hpb=0c942a4539c09adf09097315cc174aefd0eeedf7;p=linux-2.6 diff --git a/mm/msync.c b/mm/msync.c index 3b5f1c521d..0e040e9c39 100644 --- a/mm/msync.c +++ b/mm/msync.c @@ -17,27 +17,22 @@ #include #include -/* - * Called with mm->page_table_lock held to protect against other - * threads/the swapper from ripping pte's out from under us. - */ - static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; + spinlock_t *ptl; int progress = 0; again: - pte = pte_offset_map(pmd, addr); + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { unsigned long pfn; struct page *page; if (progress >= 64) { progress = 0; - if (need_resched() || - need_lockbreak(&vma->vm_mm->page_table_lock)) + if (need_resched() || need_lockbreak(ptl)) break; } progress++; @@ -46,19 +41,19 @@ again: if (!pte_maybe_dirty(*pte)) continue; pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) + if (unlikely(!pfn_valid(pfn))) { + print_bad_pte(vma, *pte, addr); continue; + } page = pfn_to_page(pfn); - if (PageReserved(page)) - continue; if (ptep_clear_flush_dirty(vma, addr, pte) || page_test_and_clear_dirty(page)) set_page_dirty(page); progress += 3; } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); - cond_resched_lock(&vma->vm_mm->page_table_lock); + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); if (addr != end) goto again; } @@ -96,27 +91,26 @@ static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd, static void msync_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; /* For hugepages we can't go walking the page table normally, * but that's ok, hugetlbfs is memory based, so we don't need - * to do anything more on an msync() */ - if (is_vm_hugetlb_page(vma)) + * to do anything more on an msync(). + * Can't do anything with VM_RESERVED regions either. + */ + if (vma->vm_flags & (VM_HUGETLB|VM_RESERVED)) return; BUG_ON(addr >= end); - pgd = pgd_offset(mm, addr); + pgd = pgd_offset(vma->vm_mm, addr); flush_cache_range(vma, addr, end); - spin_lock(&mm->page_table_lock); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; msync_pud_range(vma, pgd, addr, next); } while (pgd++, addr = next, addr != end); - spin_unlock(&mm->page_table_lock); } /*