#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/sched.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs3.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
-#define NFS_PARANOIA 1
+#include "internal.h"
static struct kmem_cache *nfs_page_cachep;
* @count: number of bytes to read/write
*
* The page must be locked by the caller. This makes sure we never
- * create two different requests for the same page, and avoids
- * a possible deadlock when we reach the hard limit on the number
- * of dirty pages.
+ * create two different requests for the same page.
* User should ensure it is safe to sleep in this function.
*/
struct nfs_page *
struct page *page,
unsigned int offset, unsigned int count)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct nfs_page *req;
- /* Deal with hard limits. */
for (;;) {
/* try to allocate the request struct */
req = nfs_page_alloc();
if (req != NULL)
break;
- /* Try to free up at least one request in order to stay
- * below the hard limit
- */
- if (signalled() && (server->flags & NFS_MOUNT_INTR))
+ if (fatal_signal_pending(current))
return ERR_PTR(-ERESTARTSYS);
yield();
}
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
- atomic_set(&req->wb_count, 1);
req->wb_context = get_nfs_open_context(ctx);
-
+ kref_init(&req->wb_kref);
return req;
}
}
/**
- * nfs_set_page_writeback_locked - Lock a request for writeback
+ * nfs_set_page_tag_locked - Tag a request as locked
* @req:
*/
-int nfs_set_page_writeback_locked(struct nfs_page *req)
+int nfs_set_page_tag_locked(struct nfs_page *req)
{
- struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
+ struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
- if (!nfs_lock_request(req))
+ if (!nfs_lock_request_dontget(req))
return 0;
- radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
+ if (req->wb_page != NULL)
+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
return 1;
}
/**
- * nfs_clear_page_writeback - Unlock request and wake up sleepers
+ * nfs_clear_page_tag_locked - Clear request tag and wake up sleepers
*/
-void nfs_clear_page_writeback(struct nfs_page *req)
+void nfs_clear_page_tag_locked(struct nfs_page *req)
{
- struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
+ struct inode *inode = req->wb_context->path.dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
if (req->wb_page != NULL) {
- spin_lock(&nfsi->req_lock);
- radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
- spin_unlock(&nfsi->req_lock);
- }
- nfs_unlock_request(req);
+ spin_lock(&inode->i_lock);
+ radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
+ nfs_unlock_request(req);
+ spin_unlock(&inode->i_lock);
+ } else
+ nfs_unlock_request(req);
}
/**
*
* Note: Should never be called with the spinlock held!
*/
-void
-nfs_release_request(struct nfs_page *req)
+static void nfs_free_request(struct kref *kref)
{
- if (!atomic_dec_and_test(&req->wb_count))
- return;
-
-#ifdef NFS_PARANOIA
- BUG_ON (!list_empty(&req->wb_list));
- BUG_ON (NFS_WBACK_BUSY(req));
-#endif
+ struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
/* Release struct file or cached credential */
nfs_clear_request(req);
nfs_page_free(req);
}
-static int nfs_wait_bit_interruptible(void *word)
+void nfs_release_request(struct nfs_page *req)
+{
+ kref_put(&req->wb_kref, nfs_free_request);
+}
+
+static int nfs_wait_bit_killable(void *word)
{
int ret = 0;
- if (signal_pending(current))
+ if (fatal_signal_pending(current))
ret = -ERESTARTSYS;
else
schedule();
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
*
- * Interruptible by signals only if mounted with intr flag.
+ * Interruptible by fatal signals only.
* The user is responsible for holding a count on the request.
*/
int
nfs_wait_on_request(struct nfs_page *req)
{
- struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->dentry->d_inode);
- sigset_t oldmask;
int ret = 0;
if (!test_bit(PG_BUSY, &req->wb_flags))
goto out;
- /*
- * Note: the call to rpc_clnt_sigmask() suffices to ensure that we
- * are not interrupted if intr flag is not set
- */
- rpc_clnt_sigmask(clnt, &oldmask);
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
- rpc_clnt_sigunmask(clnt, &oldmask);
+ nfs_wait_bit_killable, TASK_KILLABLE);
out:
return ret;
}
*/
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
- int (*doio)(struct inode *, struct list_head *, size_t, int),
- unsigned int bsize,
+ int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
+ size_t bsize,
int io_flags)
{
INIT_LIST_HEAD(&desc->pg_list);
* since nfs_flush_multi and nfs_pagein_multi assume you
* can have only one struct nfs_page.
*/
+ if (desc->pg_bsize < PAGE_SIZE)
+ return 0;
newlen += desc->pg_count;
- if (desc->pg_base + newlen > desc->pg_bsize)
+ if (newlen > desc->pg_bsize)
return 0;
prev = nfs_list_entry(desc->pg_list.prev);
if (!nfs_can_coalesce_requests(prev, req))
if (!list_empty(&desc->pg_list)) {
int error = desc->pg_doio(desc->pg_inode,
&desc->pg_list,
+ nfs_page_array_len(desc->pg_base,
+ desc->pg_count),
desc->pg_count,
desc->pg_ioflags);
if (error < 0)
nfs_pageio_doio(desc);
}
+/**
+ * nfs_pageio_cond_complete - Conditional I/O completion
+ * @desc: pointer to io descriptor
+ * @index: page index
+ *
+ * It is important to ensure that processes don't try to take locks
+ * on non-contiguous ranges of pages as that might deadlock. This
+ * function should be called before attempting to wait on a locked
+ * nfs_page. It will complete the I/O if the page index 'index'
+ * is not contiguous with the existing list of pages in 'desc'.
+ */
+void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+{
+ if (!list_empty(&desc->pg_list)) {
+ struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_doio(desc);
+ }
+}
+
#define NFS_SCAN_MAXENTRIES 16
/**
* nfs_scan_list - Scan a list for matching requests
* @nfsi: NFS inode
- * @head: One of the NFS inode request lists
* @dst: Destination list
* @idx_start: lower bound of page->index to scan
* @npages: idx_start + npages sets the upper bound to scan.
+ * @tag: tag to scan for
*
* Moves elements from one of the inode request lists.
* If the number of requests is set to 0, the entire address_space
* starting at index idx_start, is scanned.
* The requests are *not* checked to ensure that they form a contiguous set.
- * You must be holding the inode's req_lock when calling this function
+ * You must be holding the inode's i_lock when calling this function
*/
-int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head,
- struct list_head *dst, unsigned long idx_start,
- unsigned int npages)
+int nfs_scan_list(struct nfs_inode *nfsi,
+ struct list_head *dst, pgoff_t idx_start,
+ unsigned int npages, int tag)
{
struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
struct nfs_page *req;
- unsigned long idx_end;
+ pgoff_t idx_end;
int found, i;
int res;
idx_end = idx_start + npages - 1;
for (;;) {
- found = radix_tree_gang_lookup(&nfsi->nfs_page_tree,
+ found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
(void **)&pgvec[0], idx_start,
- NFS_SCAN_MAXENTRIES);
+ NFS_SCAN_MAXENTRIES, tag);
if (found <= 0)
break;
for (i = 0; i < found; i++) {
if (req->wb_index > idx_end)
goto out;
idx_start = req->wb_index + 1;
- if (req->wb_list_head != head)
- continue;
- if (nfs_set_page_writeback_locked(req)) {
+ if (nfs_set_page_tag_locked(req)) {
+ kref_get(&req->wb_kref);
nfs_list_remove_request(req);
+ radix_tree_tag_clear(&nfsi->nfs_page_tree,
+ req->wb_index, tag);
nfs_list_add_request(req, dst);
res++;
+ if (res == INT_MAX)
+ goto out;
}
}
-
+ /* for latency reduction */
+ cond_resched_lock(&nfsi->vfs_inode.i_lock);
}
out:
return res;
nfs_page_cachep = kmem_cache_create("nfs_page",
sizeof(struct nfs_page),
0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
+ NULL);
if (nfs_page_cachep == NULL)
return -ENOMEM;