X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Ffilemap_xip.c;h=8c199f537732088310c0e490cebe92c7414076bb;hb=242e54686257493f0b10ac557e730419d9af7d24;hp=7d63acd48817aed4446b161ad2a9e41d98f4b6b5;hpb=ceffc078528befc008c6f2c2c4decda79eabd534;p=linux-2.6 diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 7d63acd488..8c199f5377 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping, if (unlikely(IS_ERR(page))) { if (PTR_ERR(page) == -ENODATA) { /* sparse */ - page = virt_to_page(empty_zero_page); + page = ZERO_PAGE(0); } else { desc->error = PTR_ERR(page); goto out; } - } else - BUG_ON(!PageUptodate(page)); + } /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing @@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping, flush_dcache_page(page); /* - * Ok, we have the page, and it's up-to-date, so - * now we can copy it to user space... + * Ok, we have the page, so now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer @@ -114,83 +112,28 @@ out: file_accessed(filp); } -/* - * This is the "read()" routine for all filesystems - * that uses the get_xip_page address space operation. - */ -static ssize_t -__xip_file_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) -{ - struct file *filp = iocb->ki_filp; - ssize_t retval; - unsigned long seg; - size_t count; - - count = 0; - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iov[seg]; - - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - count += iv->iov_len; - if (unlikely((ssize_t)(count|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) - continue; - if (seg == 0) - return -EFAULT; - nr_segs = seg; - count -= iv->iov_len; /* This segment is no good */ - break; - } - - retval = 0; - if (count) { - for (seg = 0; seg < nr_segs; seg++) { - read_descriptor_t desc; - - desc.written = 0; - desc.arg.buf = iov[seg].iov_base; - desc.count = iov[seg].iov_len; - if (desc.count == 0) - continue; - desc.error = 0; - do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, - ppos, &desc, file_read_actor); - retval += desc.written; - if (!retval) { - retval = desc.error; - break; - } - } - } - return retval; -} - ssize_t -xip_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, - loff_t pos) +xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { - struct iovec local_iov = { .iov_base = buf, .iov_len = count }; + read_descriptor_t desc; - BUG_ON(iocb->ki_pos != pos); - return __xip_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); -} -EXPORT_SYMBOL_GPL(xip_file_aio_read); + if (!access_ok(VERIFY_WRITE, buf, len)) + return -EFAULT; -ssize_t -xip_file_readv(struct file *filp, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) -{ - struct kiocb kiocb; + desc.written = 0; + desc.arg.buf = buf; + desc.count = len; + desc.error = 0; - init_sync_kiocb(&kiocb, filp); - return __xip_file_aio_read(&kiocb, iov, nr_segs, ppos); + do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, + ppos, &desc, file_read_actor); + + if (desc.written) + return desc.written; + else + return desc.error; } -EXPORT_SYMBOL_GPL(xip_file_readv); +EXPORT_SYMBOL_GPL(xip_file_read); ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos, @@ -219,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile); * xip_write * * This function walks all vmas of the address_space and unmaps the - * empty_zero_page when found at pgoff. Should it go in rmap.c? + * ZERO_PAGE when found at pgoff. Should it go in rmap.c? */ static void __xip_unmap (struct address_space * mapping, @@ -242,11 +185,11 @@ __xip_unmap (struct address_space * mapping, * We need the page_table_lock to protect us from page faults, * munmap, fork, etc... */ - pte = page_check_address(virt_to_page(empty_zero_page), mm, + pte = page_check_address(ZERO_PAGE(address), mm, address); if (!IS_ERR(pte)) { /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(pte)); + flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); BUG_ON(pte_dirty(pteval)); pte_unmap(pte); @@ -285,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area, page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); if (!IS_ERR(page)) { - BUG_ON(!PageUptodate(page)); return page; } if (PTR_ERR(page) != -ENODATA) @@ -300,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area, pgoff*(PAGE_SIZE/512), 1); if (IS_ERR(page)) return NULL; - BUG_ON(!PageUptodate(page)); /* unmap page at pgoff from all other vmas */ __xip_unmap(mapping, pgoff); } else { - /* not shared and writable, use empty_zero_page */ - page = virt_to_page(empty_zero_page); + /* not shared and writable, use ZERO_PAGE() */ + page = ZERO_PAGE(address); } return page; @@ -326,25 +267,19 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma) EXPORT_SYMBOL_GPL(xip_file_mmap); static ssize_t -do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos, loff_t *ppos, - size_t count) +__xip_file_write(struct file *filp, const char __user *buf, + size_t count, loff_t pos, loff_t *ppos) { - struct file *file = iocb->ki_filp; - struct address_space * mapping = file->f_mapping; + struct address_space * mapping = filp->f_mapping; struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; long status = 0; struct page *page; size_t bytes; - const struct iovec *cur_iov = iov; /* current iovec */ - size_t iov_base = 0; /* offset in the current iovec */ - char __user *buf; ssize_t written = 0; BUG_ON(!mapping->a_ops->get_xip_page); - buf = iov->iov_base; do { unsigned long index; unsigned long offset; @@ -365,15 +300,14 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, fault_in_pages_readable(buf, bytes); page = a_ops->get_xip_page(mapping, - index*(PAGE_SIZE/512), 0); + index*(PAGE_SIZE/512), 0); if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { /* we allocate a new page unmap it */ page = a_ops->get_xip_page(mapping, - index*(PAGE_SIZE/512), 1); + index*(PAGE_SIZE/512), 1); if (!IS_ERR(page)) - /* unmap page at pgoff from all other vmas */ - __xip_unmap(mapping, index); - + /* unmap page at pgoff from all other vmas */ + __xip_unmap(mapping, index); } if (IS_ERR(page)) { @@ -381,14 +315,7 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, break; } - BUG_ON(!PageUptodate(page)); - - if (likely(nr_segs == 1)) - copied = filemap_copy_from_user(page, offset, - buf, bytes); - else - copied = filemap_copy_from_user_iovec(page, offset, - cur_iov, iov_base, bytes); + copied = filemap_copy_from_user(page, offset, buf, bytes); flush_dcache_page(page); if (likely(copied > 0)) { status = copied; @@ -398,9 +325,6 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, count -= status; pos += status; buf += status; - if (unlikely(nr_segs > 1)) - filemap_set_next_iovec(&cur_iov, - &iov_base, status); } } if (unlikely(copied != bytes)) @@ -422,110 +346,52 @@ do_xip_file_write(struct kiocb *iocb, const struct iovec *iov, return written ? written : status; } -static ssize_t -xip_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) +ssize_t +xip_file_write(struct file *filp, const char __user *buf, size_t len, + loff_t *ppos) { - struct file *file = iocb->ki_filp; - struct address_space * mapping = file->f_mapping; - size_t ocount; /* original count */ - size_t count; /* after file limit checks */ - struct inode *inode = mapping->host; - unsigned long seg; - loff_t pos; - ssize_t written; - ssize_t err; + struct address_space *mapping = filp->f_mapping; + struct inode *inode = mapping->host; + size_t count; + loff_t pos; + ssize_t ret; - ocount = 0; - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iov[seg]; + down(&inode->i_sem); - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - ocount += iv->iov_len; - if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) - return -EINVAL; - if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) - continue; - if (seg == 0) - return -EFAULT; - nr_segs = seg; - ocount -= iv->iov_len; /* This segment is no good */ - break; + if (!access_ok(VERIFY_READ, buf, len)) { + ret=-EFAULT; + goto out_up; } - count = ocount; pos = *ppos; + count = len; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - written = 0; - - err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (err) - goto out; + /* We can write back this queue in page reclaim */ + current->backing_dev_info = mapping->backing_dev_info; + ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); + if (ret) + goto out_backing; if (count == 0) - goto out; + goto out_backing; - err = remove_suid(file->f_dentry); - if (err) - goto out; + ret = remove_suid(filp->f_dentry); + if (ret) + goto out_backing; inode_update_time(inode, 1); - /* use execute in place to copy directly to disk */ - written = do_xip_file_write (iocb, iov, - nr_segs, pos, ppos, count); - out: - return written ? written : err; -} - -static ssize_t -__xip_file_write_nolock(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) -{ - struct kiocb kiocb; - - init_sync_kiocb(&kiocb, file); - return xip_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); -} - -ssize_t -xip_file_aio_write(struct kiocb *iocb, const char __user *buf, - size_t count, loff_t pos) -{ - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t ret; - struct iovec local_iov = { .iov_base = (void __user *)buf, - .iov_len = count }; + ret = __xip_file_write (filp, buf, count, pos, ppos); - BUG_ON(iocb->ki_pos != pos); - - down(&inode->i_sem); - ret = xip_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos); - up(&inode->i_sem); - return ret; -} -EXPORT_SYMBOL_GPL(xip_file_aio_write); - -ssize_t xip_file_writev(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) -{ - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t ret; - - down(&inode->i_sem); - ret = __xip_file_write_nolock(file, iov, nr_segs, ppos); + out_backing: + current->backing_dev_info = NULL; + out_up: up(&inode->i_sem); return ret; } -EXPORT_SYMBOL_GPL(xip_file_writev); +EXPORT_SYMBOL_GPL(xip_file_write); /* * truncate a page used for execute in place @@ -541,7 +407,6 @@ xip_truncate_page(struct address_space *mapping, loff_t from) unsigned length; struct page *page; void *kaddr; - int err; BUG_ON(!mapping->a_ops->get_xip_page); @@ -556,26 +421,20 @@ xip_truncate_page(struct address_space *mapping, loff_t from) page = mapping->a_ops->get_xip_page(mapping, index*(PAGE_SIZE/512), 0); - err = -ENOMEM; if (!page) - goto out; + return -ENOMEM; if (unlikely(IS_ERR(page))) { - if (PTR_ERR(page) == -ENODATA) { + if (PTR_ERR(page) == -ENODATA) /* Hole? No need to truncate */ return 0; - } else { - err = PTR_ERR(page); - goto out; - } - } else - BUG_ON(!PageUptodate(page)); + else + return PTR_ERR(page); + } kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + offset, 0, length); kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(page); - err = 0; -out: - return err; + return 0; } EXPORT_SYMBOL_GPL(xip_truncate_page);