}
/*
- * This function is called to print an error when a pte in a
- * !VM_RESERVED region is found pointing to an invalid pfn (which
- * is an error.
+ * This function is called to print an error when a bad pte
+ * is found. For example, we might have a PFN-mapped pte in
+ * a region that doesn't allow it.
*
* The calling function must still handle the error.
*/
dump_stack();
}
+static inline int is_cow_mapping(unsigned int flags)
+{
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
+
+/*
+ * This function gets the "struct page" associated with a pte.
+ *
+ * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
+ * will have each page table entry just pointing to a raw page frame
+ * number, and as far as the VM layer is concerned, those do not have
+ * pages associated with them - even if the PFN might point to memory
+ * that otherwise is perfectly fine and has a "struct page".
+ *
+ * The way we recognize those mappings is through the rules set up
+ * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
+ * and the vm_pgoff will point to the first PFN mapped: thus every
+ * page that is a raw mapping will always honor the rule
+ *
+ * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
+ *
+ * and if that isn't true, the page has been COW'ed (in which case it
+ * _does_ have a "struct page" associated with it even if it is in a
+ * VM_PFNMAP range).
+ */
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ if (vma->vm_flags & VM_PFNMAP) {
+ unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+
+ /*
+ * Add some anal sanity checks for now. Eventually,
+ * we should just do "return pfn_to_page(pfn)", but
+ * in the meantime we check that we get a valid pfn,
+ * and that the resulting page looks ok.
+ *
+ * Remove this test eventually!
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page
+ * tables.
+ *
+ * The PAGE_ZERO() pages and various VDSO mappings can
+ * cause them to exist.
+ */
+ return pfn_to_page(pfn);
+}
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
- unsigned long pfn;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
goto out_set_pte;
}
- /* If the region is VM_RESERVED, the mapping is not
- * mapped via rmap - duplicate the pte as is.
- */
- if (vm_flags & VM_RESERVED)
- goto out_set_pte;
-
- pfn = pte_pfn(pte);
- /* If the pte points outside of valid memory but
- * the region is not VM_RESERVED, we have a problem.
- */
- if (unlikely(!pfn_valid(pfn))) {
- print_bad_pte(vma, pte, addr);
- goto out_set_pte; /* try to do something sane */
- }
-
- page = pfn_to_page(pfn);
-
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
- if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
+ if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = *src_pte;
}
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
- get_page(page);
- page_dup_rmap(page);
- rss[!!PageAnon(page)]++;
+
+ page = vm_normal_page(vma, addr, pte);
+ if (page) {
+ get_page(page);
+ page_dup_rmap(page);
+ rss[!!PageAnon(page)]++;
+ }
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
continue;
}
if (pte_present(ptent)) {
- struct page *page = NULL;
+ struct page *page;
(*zap_work) -= PAGE_SIZE;
- if (!(vma->vm_flags & VM_RESERVED)) {
- unsigned long pfn = pte_pfn(ptent);
- if (unlikely(!pfn_valid(pfn)))
- print_bad_pte(vma, ptent, addr);
- else
- page = pfn_to_page(pfn);
- }
+ page = vm_normal_page(vma, addr, ptent);
if (unlikely(details) && page) {
/*
* unmap_shared_mapping_pages() wants to
/*
* Do a quick page-table lookup for a single page.
*/
-struct page *follow_page(struct mm_struct *mm, unsigned long address,
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int flags)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long pfn;
struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
if (!IS_ERR(page)) {
goto unlock;
if ((flags & FOLL_WRITE) && !pte_write(pte))
goto unlock;
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ page = vm_normal_page(vma, address, pte);
+ if (unlikely(!page))
goto unlock;
- page = pfn_to_page(pfn);
if (flags & FOLL_GET)
get_page(page);
if (flags & FOLL_TOUCH) {
return i ? : -EFAULT;
}
if (pages) {
- pages[i] = pte_page(*pte);
- get_page(pages[i]);
+ struct page *page = vm_normal_page(gate_vma, start, *pte);
+ pages[i] = page;
+ if (page)
+ get_page(page);
}
pte_unmap(pte);
if (vmas)
continue;
}
- if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
foll_flags |= FOLL_WRITE;
cond_resched();
- while (!(page = follow_page(mm, start, foll_flags))) {
+ while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
ret = __handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
return err;
}
+pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
+{
+ pgd_t * pgd = pgd_offset(mm, addr);
+ pud_t * pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ pmd_t * pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ return pte_alloc_map_lock(mm, pmd, addr, ptl);
+ }
+ return NULL;
+}
+
+/*
+ * This is the old fallback for page remapping.
+ *
+ * For historical reasons, it only allows reserved pages. Only
+ * old drivers should use this, and they needed to mark their
+ * pages reserved for the old functions anyway.
+ */
+static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+{
+ int retval;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ retval = -EINVAL;
+ if (PageAnon(page))
+ goto out;
+ retval = -ENOMEM;
+ flush_dcache_page(page);
+ pte = get_locked_pte(mm, addr, &ptl);
+ if (!pte)
+ goto out;
+ retval = -EBUSY;
+ if (!pte_none(*pte))
+ goto out_unlock;
+
+ /* Ok, finally just insert the thing.. */
+ get_page(page);
+ inc_mm_counter(mm, file_rss);
+ page_add_file_rmap(page);
+ set_pte_at(mm, addr, pte, mk_pte(page, prot));
+
+ retval = 0;
+out_unlock:
+ pte_unmap_unlock(pte, ptl);
+out:
+ return retval;
+}
+
+/*
+ * This allows drivers to insert individual pages they've allocated
+ * into a user vma.
+ *
+ * The page has to be a nice clean _individual_ kernel allocation.
+ * If you allocate a compound page, you need to have marked it as
+ * such (__GFP_COMP), or manually just split the page up yourself
+ * (which is mainly an issue of doing "set_page_count(page, 1)" for
+ * each sub-page, and then freeing them one by one when you free
+ * them rather than freeing it as a compound page).
+ *
+ * NOTE! Traditionally this was done with "remap_pfn_range()" which
+ * took an arbitrary page protection parameter. This doesn't allow
+ * that. Your vma protection will have to be set up correctly, which
+ * means that if you want a shared writable mapping, you'd better
+ * ask for a shared writable mapping!
+ *
+ * The page does not need to be reserved.
+ */
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+{
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ if (!page_count(page))
+ return -EINVAL;
+ vma->vm_flags |= VM_INSERTPAGE;
+ return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_page);
+
/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* rest of the world about it:
* VM_IO tells people not to look at these pages
* (accesses can have side effects).
- * VM_RESERVED tells the core MM not to "manage" these pages
- * (e.g. refcount, mapcount, try to swap them out).
+ * VM_RESERVED is specified all over the place, because
+ * in 2.4 it kept swapout's vma scan off this vma; but
+ * in 2.6 the LRU scan won't even find its pages, so this
+ * flag means no more than count its pages in reserved_vm,
+ * and omit it from core dump, even when VM_IO turned off.
+ * VM_PFNMAP tells the core MM that the base pages are just
+ * raw PFN mappings, and do not have a "struct page" associated
+ * with them.
+ *
+ * There's a horrible special case to handle copy-on-write
+ * behaviour that some programs depend on. We mark the "original"
+ * un-COW'ed pages by matching them up with "vma->vm_pgoff".
*/
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ if (is_cow_mapping(vma->vm_flags)) {
+ if (addr != vma->vm_start || end != vma->vm_end)
+ return -EINVAL;
+ vma->vm_pgoff = pfn;
+ }
+
+ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
return pte;
}
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+{
+ /*
+ * If the source page was a PFN mapping, we don't have
+ * a "struct page" for it. We do a best-effort copy by
+ * just copying from the original user address. If that
+ * fails, we just zero-fill it. Live with it.
+ */
+ if (unlikely(!src)) {
+ void *kaddr = kmap_atomic(dst, KM_USER0);
+ void __user *uaddr = (void __user *)(va & PAGE_MASK);
+
+ /*
+ * This really shouldn't fail, because the page is there
+ * in the page tables. But it might just be unreadable,
+ * in which case we just give up and fill the result with
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
+ memset(kaddr, 0, PAGE_SIZE);
+ kunmap_atomic(kaddr, KM_USER0);
+ return;
+
+ }
+ copy_user_highpage(dst, src, va);
+}
+
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
spinlock_t *ptl, pte_t orig_pte)
{
struct page *old_page, *new_page;
- unsigned long pfn = pte_pfn(orig_pte);
pte_t entry;
int ret = VM_FAULT_MINOR;
- BUG_ON(vma->vm_flags & VM_RESERVED);
-
- if (unlikely(!pfn_valid(pfn))) {
- /*
- * Page table corrupted: show pte and kill process.
- */
- print_bad_pte(vma, orig_pte, address);
- ret = VM_FAULT_OOM;
- goto unlock;
- }
- old_page = pfn_to_page(pfn);
+ old_page = vm_normal_page(vma, address, orig_pte);
+ if (!old_page)
+ goto gotten;
if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
int reuse = can_share_swap_page(old_page);
unlock_page(old_page);
if (reuse) {
- flush_cache_page(vma, address, pfn);
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_set_access_flags(vma, address, page_table, entry, 1);
* Ok, we need to copy. Oh, well..
*/
page_cache_get(old_page);
+gotten:
pte_unmap_unlock(page_table, ptl);
if (unlikely(anon_vma_prepare(vma)))
new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page)
goto oom;
- copy_user_highpage(new_page, old_page, address);
+ cow_user_page(new_page, old_page, address);
}
/*
*/
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
if (likely(pte_same(*page_table, orig_pte))) {
- page_remove_rmap(old_page);
- if (!PageAnon(old_page)) {
+ if (old_page) {
+ page_remove_rmap(old_page);
+ if (!PageAnon(old_page)) {
+ dec_mm_counter(mm, file_rss);
+ inc_mm_counter(mm, anon_rss);
+ }
+ } else
inc_mm_counter(mm, anon_rss);
- dec_mm_counter(mm, file_rss);
- }
- flush_cache_page(vma, address, pfn);
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_establish(vma, address, page_table, entry);
new_page = old_page;
ret |= VM_FAULT_WRITE;
}
- page_cache_release(new_page);
- page_cache_release(old_page);
+ if (new_page)
+ page_cache_release(new_page);
+ if (old_page)
+ page_cache_release(old_page);
unlock:
pte_unmap_unlock(page_table, ptl);
return ret;
oom:
- page_cache_release(old_page);
+ if (old_page)
+ page_cache_release(old_page);
return VM_FAULT_OOM;
}
out_busy:
return -ETXTBSY;
}
-
EXPORT_SYMBOL(vmtruncate);
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * If the underlying filesystem is not going to provide
+ * a way to truncate a range of blocks (punch a hole) -
+ * we should return failure right now.
+ */
+ if (!inode->i_op || !inode->i_op->truncate_range)
+ return -ENOSYS;
+
+ down(&inode->i_sem);
+ down_write(&inode->i_alloc_sem);
+ unmap_mapping_range(mapping, offset, (end - offset), 1);
+ truncate_inode_pages_range(mapping, offset, end);
+ inode->i_op->truncate_range(inode, offset, end);
+ up_write(&inode->i_alloc_sem);
+ up(&inode->i_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL(vmtruncate_range);
+
/*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
int anon = 0;
pte_unmap(page_table);
+ BUG_ON(vma->vm_flags & VM_PFNMAP);
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address);
- } else if (!(vma->vm_flags & VM_RESERVED)) {
+ } else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
}
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PUD_FOLDED */
#ifndef __PAGETABLE_PMD_FOLDED
spin_unlock(&mm->page_table_lock);
return 0;
}
+#else
+/* Workaround for gcc 2.96 */
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return 0;
+}
#endif /* __PAGETABLE_PMD_FOLDED */
int make_pages_present(unsigned long addr, unsigned long end)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_page_prot = PAGE_READONLY;
- gate_vma.vm_flags = VM_RESERVED;
+ gate_vma.vm_flags = 0;
return 0;
}
__initcall(gate_vma_init);