nfs_release_request(req);
}
+/**
+ * nfs_set_page_writeback_locked - Lock a request for writeback
+ * @req:
+ */
+int nfs_set_page_writeback_locked(struct nfs_page *req)
+{
+ struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
+
+ if (!nfs_lock_request(req))
+ return 0;
+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
+ return 1;
+}
+
+/**
+ * nfs_clear_page_writeback - Unlock request and wake up sleepers
+ */
+void nfs_clear_page_writeback(struct nfs_page *req)
+{
+ struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
+
+ spin_lock(&nfsi->req_lock);
+ radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
+ spin_unlock(&nfsi->req_lock);
+ nfs_unlock_request(req);
+}
+
/**
* nfs_clear_request - Free up all resources allocated to the request
* @req:
if (req->wb_index > idx_end)
break;
- if (!nfs_lock_request(req))
+ if (!nfs_set_page_writeback_locked(req))
continue;
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
- nfs_lock_request(new);
nfs_list_add_request(new, &one_request);
nfs_pagein_one(&one_request, inode);
return 0;
nfs_clear_request(req);
nfs_release_request(req);
- nfs_unlock_request(req);
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
req->wb_context->dentry->d_inode->i_sb->s_id,
}
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
- nfs_lock_request(new);
nfs_list_add_request(new, desc->head);
return 0;
}
spin_lock(&nfsi->req_lock);
next = idx_start;
- while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {
+ while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
if (req->wb_index > idx_end)
break;
next = req->wb_index + 1;
- if (!NFS_WBACK_BUSY(req))
- continue;
+ BUG_ON(!NFS_WBACK_BUSY(req));
atomic_inc(&req->wb_count);
spin_unlock(&nfsi->req_lock);
#else
nfs_inode_remove_request(req);
#endif
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
}
static inline int flush_task_priority(int how)
nfs_writedata_free(data);
}
nfs_mark_request_dirty(req);
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
return -ENOMEM;
}
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
}
return error;
}
nfs_inode_remove_request(req);
#endif
next:
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
}
}
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req);
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
- nfs_unlock_request(req);
+ nfs_clear_page_writeback(req);
res++;
}
sub_page_state(nr_unstable,res);
#include <asm/atomic.h>
+/*
+ * Valid flags for the radix tree
+ */
+#define NFS_PAGE_TAG_WRITEBACK 1
+
/*
* Valid flags for a dirty buffer
*/
unsigned int);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
+extern int nfs_set_page_writeback_locked(struct nfs_page *req);
+extern void nfs_clear_page_writeback(struct nfs_page *req);
+
/*
* Lock the page of an asynchronous request without incrementing the wb_count
{
if (list_empty(&req->wb_list))
return;
- if (!NFS_WBACK_BUSY(req)) {
- printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n");
- BUG();
- }
list_del_init(&req->wb_list);
req->wb_list_head = NULL;
}