_enter("{%lu},%lu", dir->i_ino, index);
- page = read_cache_page(dir->i_mapping,index,
- (filler_t *) dir->i_mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(dir->i_mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
int afs_mntpt_check_symlink(struct afs_vnode *vnode)
{
struct page *page;
- filler_t *filler;
size_t size;
char *buf;
int ret;
_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
/* read the contents of the symlink into the pagecache */
- filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
-
- page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
- filler, NULL);
+ page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
struct page *page = NULL;
size_t size;
char *buf, *devname = NULL, *options = NULL;
- filler_t *filler;
int ret;
kenter("{%s}", mntpt->d_name.name);
goto error;
/* read the contents of the AFS special symlink */
- filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
-
- page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
+ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto error;
struct page *page = NULL;
if (blocknr + i < devsize) {
- page = read_cache_page(mapping, blocknr + i,
- (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, blocknr + i, NULL);
/* synchronous error? */
if (IS_ERR(page))
page = NULL;
static struct page * ext2_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
{
struct page * pp;
- pp = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ pp = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(pp)) {
wait_on_page_locked(pp);
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; i++) {
- page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block++, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
- page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, pnr, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
break;
set_page_dirty(page);
kunmap(page);
- page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, ++pnr, NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
- page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
goto free_tree;
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
}
SetPageUptodate(page);
} else {
- page = read_cache_page(mapping, page_index,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, page_index, NULL);
if (IS_ERR(page) || !PageUptodate(page)) {
- jfs_err("read_cache_page failed!");
+ jfs_err("read_mapping_page failed!");
return NULL;
}
lock_page(page);
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
static inline struct page *ntfs_map_page(struct address_space *mapping,
unsigned long index)
{
- struct page *page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
end >>= PAGE_CACHE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (sync error, index 0x%lx).", idx);
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(sync error, index 0x%lx).", idx);
* Read the page. If the page is not present, this will zero
* the uninitialized regions for us.
*/
- page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto init_err_out;
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page;
- page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+ NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page))
/* We can deadlock if we try to free dentries,
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
mapping_set_gfp_mask(mapping, GFP_NOFS);
- page = read_cache_page(mapping, n,
- (filler_t *) mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ unsigned long index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
int add_to_page_cache(struct page *page, struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
error = -EINVAL;
goto bad_swap;
}
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, swap_file);
+ page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;