]> err.no Git - linux-2.6/blobdiff - mm/memory.c
Automatic merge of /spare/repo/netdev-2.6 branch starfire
[linux-2.6] / mm / memory.c
index 854bd90eeca1928b15dfb61088dd3d13360cb373..d209f745db7fbc3154e83cf04666770068986724 100644 (file)
@@ -190,7 +190,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-static inline void free_pgd_range(struct mmu_gather *tlb,
+void free_pgd_range(struct mmu_gather **tlb,
                        unsigned long addr, unsigned long end,
                        unsigned long floor, unsigned long ceiling)
 {
@@ -241,37 +241,47 @@ static inline void free_pgd_range(struct mmu_gather *tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset(tlb->mm, addr);
+       pgd = pgd_offset((*tlb)->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 
-       if (!tlb_is_full_mm(tlb))
-               flush_tlb_pgtables(tlb->mm, start, end);
+       if (!tlb_is_full_mm(*tlb))
+               flush_tlb_pgtables((*tlb)->mm, start, end);
 }
 
 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
-                               unsigned long floor, unsigned long ceiling)
+               unsigned long floor, unsigned long ceiling)
 {
        while (vma) {
                struct vm_area_struct *next = vma->vm_next;
                unsigned long addr = vma->vm_start;
 
-               /* Optimization: gather nearby vmas into a single call down */
-               while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
-                       vma = next;
-                       next = vma->vm_next;
-               }
-               free_pgd_range(*tlb, addr, vma->vm_end,
+               if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
+                       hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
                                floor, next? next->vm_start: ceiling);
+               } else {
+                       /*
+                        * Optimization: gather nearby vmas into one call down
+                        */
+                       while (next && next->vm_start <= vma->vm_end + PMD_SIZE
+                         && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
+                                                       HPAGE_SIZE)) {
+                               vma = next;
+                               next = vma->vm_next;
+                       }
+                       free_pgd_range(tlb, addr, vma->vm_end,
+                               floor, next? next->vm_start: ceiling);
+               }
                vma = next;
        }
 }
 
-pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall *pte_alloc_map(struct mm_struct *mm, pmd_t *pmd,
+                               unsigned long address)
 {
        if (!pmd_present(*pmd)) {
                struct page *new;
@@ -1691,12 +1701,13 @@ static int do_swap_page(struct mm_struct * mm,
        spin_lock(&mm->page_table_lock);
        page_table = pte_offset_map(pmd, address);
        if (unlikely(!pte_same(*page_table, orig_pte))) {
-               pte_unmap(page_table);
-               spin_unlock(&mm->page_table_lock);
-               unlock_page(page);
-               page_cache_release(page);
                ret = VM_FAULT_MINOR;
-               goto out;
+               goto out_nomap;
+       }
+
+       if (unlikely(!PageUptodate(page))) {
+               ret = VM_FAULT_SIGBUS;
+               goto out_nomap;
        }
 
        /* The page isn't present yet, go ahead with the fault. */
@@ -1731,6 +1742,12 @@ static int do_swap_page(struct mm_struct * mm,
        spin_unlock(&mm->page_table_lock);
 out:
        return ret;
+out_nomap:
+       pte_unmap(page_table);
+       spin_unlock(&mm->page_table_lock);
+       unlock_page(page);
+       page_cache_release(page);
+       goto out;
 }
 
 /*