#include <linux/module.h>
#include <linux/uio.h>
#include <linux/rmap.h>
+#include <linux/sched.h>
#include <asm/tlbflush.h>
-#include "filemap.h"
+
+/*
+ * We do use our own empty page to avoid interference with other users
+ * of ZERO_PAGE(), such as /dev/zero
+ */
+static struct page *__xip_sparse_page;
+
+static struct page *xip_sparse_page(void)
+{
+ if (!__xip_sparse_page) {
+ unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
+ if (zeroes) {
+ static DEFINE_SPINLOCK(xip_alloc_lock);
+ spin_lock(&xip_alloc_lock);
+ if (!__xip_sparse_page)
+ __xip_sparse_page = virt_to_page(zeroes);
+ else
+ free_page(zeroes);
+ spin_unlock(&xip_alloc_lock);
+ }
+ }
+ return __xip_sparse_page;
+}
/*
* This is a file read routine for execute in place files, and uses
}
EXPORT_SYMBOL_GPL(xip_file_read);
-ssize_t
-xip_file_sendfile(struct file *in_file, loff_t *ppos,
- size_t count, read_actor_t actor, void *target)
-{
- read_descriptor_t desc;
-
- if (!count)
- return 0;
-
- desc.written = 0;
- desc.count = count;
- desc.arg.data = target;
- desc.error = 0;
-
- do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file,
- ppos, &desc, actor);
- if (desc.written)
- return desc.written;
- return desc.error;
-}
-EXPORT_SYMBOL_GPL(xip_file_sendfile);
-
/*
* __xip_unmap is invoked from xip_unmap and
* xip_write
*
* This function walks all vmas of the address_space and unmaps the
- * ZERO_PAGE when found at pgoff. Should it go in rmap.c?
+ * __xip_sparse_page when found at pgoff.
*/
static void
__xip_unmap (struct address_space * mapping,
spinlock_t *ptl;
struct page *page;
+ page = __xip_sparse_page;
+ if (!page)
+ return;
+
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
mm = vma->vm_mm;
address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- page = ZERO_PAGE(0);
pte = page_check_address(page, mm, address, &ptl);
if (pte) {
/* Nuke the page table entry. */
}
/*
- * xip_nopage() is invoked via the vma operations vector for a
+ * xip_fault() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
- * This function is derived from filemap_nopage, but used for execute in place
+ * This function is derived from filemap_fault, but used for execute in place
*/
-static struct page *
-xip_file_nopage(struct vm_area_struct * area,
- unsigned long address,
- int *type)
+static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
{
struct file *file = area->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct page *page;
- unsigned long size, pgoff, endoff;
+ pgoff_t size;
- pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT)
- + area->vm_pgoff;
- endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT)
- + area->vm_pgoff;
+ /* XXX: are VM_FAULT_ codes OK? */
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (pgoff >= size) {
- return NULL;
- }
+ if (vmf->pgoff >= size)
+ return VM_FAULT_SIGBUS;
- page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0);
- if (!IS_ERR(page)) {
+ page = mapping->a_ops->get_xip_page(mapping,
+ vmf->pgoff*(PAGE_SIZE/512), 0);
+ if (!IS_ERR(page))
goto out;
- }
if (PTR_ERR(page) != -ENODATA)
- return NULL;
+ return VM_FAULT_OOM;
/* sparse block */
if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
(area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
(!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
/* maybe shared writable, allocate new block */
- page = mapping->a_ops->get_xip_page (mapping,
- pgoff*(PAGE_SIZE/512), 1);
+ page = mapping->a_ops->get_xip_page(mapping,
+ vmf->pgoff*(PAGE_SIZE/512), 1);
if (IS_ERR(page))
- return NULL;
+ return VM_FAULT_SIGBUS;
/* unmap page at pgoff from all other vmas */
- __xip_unmap(mapping, pgoff);
+ __xip_unmap(mapping, vmf->pgoff);
} else {
- /* not shared and writable, use ZERO_PAGE() */
- page = ZERO_PAGE(0);
+ /* not shared and writable, use xip_sparse_page() */
+ page = xip_sparse_page();
+ if (!page)
+ return VM_FAULT_OOM;
}
out:
page_cache_get(page);
- return page;
+ vmf->page = page;
+ return 0;
}
static struct vm_operations_struct xip_file_vm_ops = {
- .nopage = xip_file_nopage,
+ .fault = xip_file_fault,
};
int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
file_accessed(file);
vma->vm_ops = &xip_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
EXPORT_SYMBOL_GPL(xip_file_mmap);
unsigned long index;
unsigned long offset;
size_t copied;
+ char *kaddr;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
if (bytes > count)
bytes = count;
- /*
- * Bring in the user page that we will copy from _first_.
- * Otherwise there's a nasty deadlock on copying from the
- * same page as we're writing to, without it being marked
- * up-to-date.
- */
- fault_in_pages_readable(buf, bytes);
-
page = a_ops->get_xip_page(mapping,
index*(PAGE_SIZE/512), 0);
if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
break;
}
- copied = filemap_copy_from_user(page, offset, buf, bytes);
+ fault_in_pages_readable(buf, bytes);
+ kaddr = kmap_atomic(page, KM_USER0);
+ copied = bytes -
+ __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
+ kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
+
if (likely(copied > 0)) {
status = copied;
unsigned blocksize;
unsigned length;
struct page *page;
- void *kaddr;
BUG_ON(!mapping->a_ops->get_xip_page);
else
return PTR_ERR(page);
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- kunmap_atomic(kaddr, KM_USER0);
-
- flush_dcache_page(page);
+ zero_user_page(page, offset, length, KM_USER0);
return 0;
}
EXPORT_SYMBOL_GPL(xip_truncate_page);