#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/writeback.h>
+#include <linux/swap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
static struct nfs_page * nfs_update_request(struct nfs_open_context*,
struct page *,
unsigned int, unsigned int);
-static void nfs_mark_request_dirty(struct nfs_page *req);
-static int nfs_wait_on_write_congestion(struct address_space *, int);
-static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
+static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
+ struct inode *inode, int ioflags);
static const struct rpc_call_ops nfs_write_partial_ops;
static const struct rpc_call_ops nfs_write_full_ops;
static const struct rpc_call_ops nfs_commit_ops;
static mempool_t *nfs_wdata_mempool;
static mempool_t *nfs_commit_mempool;
-static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
-
struct nfs_write_data *nfs_commit_alloc(void)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
}
-struct nfs_write_data *nfs_writedata_alloc(size_t len)
+struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
- unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
if (p) {
{
struct inode *inode = page->mapping->host;
loff_t end, i_size = i_size_read(inode);
- unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
if (i_size > 0 && page->index < end_index)
return;
i_size_write(inode, end);
}
+/* A writeback failed: mark the page as bad, and invalidate the page cache */
+static void nfs_set_pageerror(struct page *page)
+{
+ SetPageError(page);
+ nfs_zap_mapping(page->mapping->host, page->mapping);
+}
+
/* We can set the PG_uptodate flag if we see that a write request
* covers the full page.
*/
static int wb_priority(struct writeback_control *wbc)
{
if (wbc->for_reclaim)
- return FLUSH_HIGHPRI;
+ return FLUSH_HIGHPRI | FLUSH_STABLE;
if (wbc->for_kupdate)
return FLUSH_LOWPRI;
return 0;
}
+/*
+ * NFS congestion control
+ */
+
+int nfs_congestion_kb;
+
+#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
+#define NFS_CONGESTION_OFF_THRESH \
+ (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
+
+static int nfs_set_page_writeback(struct page *page)
+{
+ int ret = test_set_page_writeback(page);
+
+ if (!ret) {
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ if (atomic_inc_return(&nfss->writeback) >
+ NFS_CONGESTION_ON_THRESH)
+ set_bdi_congested(&nfss->backing_dev_info, WRITE);
+ }
+ return ret;
+}
+
+static void nfs_end_page_writeback(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ end_page_writeback(page);
+ if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
+ clear_bdi_congested(&nfss->backing_dev_info, WRITE);
+ congestion_end(WRITE);
+ }
+}
+
/*
* Find an associated nfs write request, and prepare to flush it out
* Returns 1 if there was no write request, or if the request was
* was not tagged.
* May also return an error if the user signalled nfs_wait_on_request().
*/
-static int nfs_page_mark_flush(struct page *page)
+static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ struct page *page)
{
struct nfs_page *req;
- spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ spinlock_t *req_lock = &nfsi->req_lock;
int ret;
spin_lock(req_lock);
* request as dirty (in which case we don't care).
*/
spin_unlock(req_lock);
+ /* Prevent deadlock! */
+ nfs_pageio_complete(pgio);
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
return ret;
spin_lock(req_lock);
}
- spin_unlock(req_lock);
- if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
- nfs_mark_request_dirty(req);
- set_page_writeback(page);
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ /* This request is marked for commit */
+ spin_unlock(req_lock);
+ nfs_unlock_request(req);
+ nfs_pageio_complete(pgio);
+ return 1;
+ }
+ if (nfs_set_page_writeback(page) != 0) {
+ spin_unlock(req_lock);
+ BUG();
}
+ radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
+ NFS_PAGE_TAG_WRITEBACK);
ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
- nfs_unlock_request(req);
+ spin_unlock(req_lock);
+ nfs_pageio_add_request(pgio, req);
return ret;
}
*/
static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
+ struct nfs_pageio_descriptor mypgio, *pgio;
struct nfs_open_context *ctx;
struct inode *inode = page->mapping->host;
unsigned offset;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
- err = nfs_page_mark_flush(page);
+ if (wbc->for_writepages)
+ pgio = wbc->fs_private;
+ else {
+ nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc));
+ pgio = &mypgio;
+ }
+
+ err = nfs_page_async_flush(pgio, page);
if (err <= 0)
goto out;
err = 0;
put_nfs_open_context(ctx);
if (err != 0)
goto out;
- err = nfs_page_mark_flush(page);
+ err = nfs_page_async_flush(pgio, page);
if (err > 0)
err = 0;
out:
if (!wbc->for_writepages)
- nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc));
+ nfs_pageio_complete(pgio);
return err;
}
return err;
}
-/*
- * Note: causes nfs_update_request() to block on the assumption
- * that the writeback is generated due to memory pressure.
- */
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
struct inode *inode = mapping->host;
+ struct nfs_pageio_descriptor pgio;
int err;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+ nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+ wbc->fs_private = &pgio;
err = generic_writepages(mapping, wbc);
+ nfs_pageio_complete(&pgio);
if (err)
return err;
- while (test_and_set_bit(BDI_write_congested, &bdi->state) != 0) {
- if (wbc->nonblocking)
- return 0;
- nfs_wait_on_write_congestion(mapping, 0);
- }
- err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
- if (err < 0)
- goto out;
- nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
- err = 0;
-out:
- clear_bit(BDI_write_congested, &bdi->state);
- wake_up_all(&nfs_write_congestion);
- congestion_end(WRITE);
- return err;
+ if (pgio.pg_error)
+ return pgio.pg_error;
+ return 0;
}
/*
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
+ if (PageDirty(req->wb_page))
+ set_bit(PG_NEED_FLUSH, &req->wb_flags);
nfsi->npages++;
atomic_inc(&req->wb_count);
return 0;
}
/*
- * Insert a write request into an inode
+ * Remove a write request from an inode
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
set_page_private(req->wb_page, 0);
ClearPagePrivate(req->wb_page);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+ if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
+ __set_page_dirty_nobuffers(req->wb_page);
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&nfsi->req_lock);
nfs_release_request(req);
}
-/*
- * Add a request to the inode's dirty list.
- */
-static void
-nfs_mark_request_dirty(struct nfs_page *req)
-{
- struct inode *inode = req->wb_context->dentry->d_inode;
- struct nfs_inode *nfsi = NFS_I(inode);
-
- spin_lock(&nfsi->req_lock);
- radix_tree_tag_set(&nfsi->nfs_page_tree,
- req->wb_index, NFS_PAGE_TAG_DIRTY);
- nfs_list_add_request(req, &nfsi->dirty);
- nfsi->ndirty++;
- spin_unlock(&nfsi->req_lock);
- __mark_inode_dirty(inode, I_DIRTY_PAGES);
-}
-
static void
nfs_redirty_request(struct nfs_page *req)
{
- clear_bit(PG_FLUSHING, &req->wb_flags);
__set_page_dirty_nobuffers(req->wb_page);
}
static inline int
nfs_dirty_request(struct nfs_page *req)
{
- return test_bit(PG_FLUSHING, &req->wb_flags) == 0;
+ struct page *page = req->wb_page;
+
+ if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
+ return 0;
+ return !PageWriteback(req->wb_page);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
spin_lock(&nfsi->req_lock);
nfs_list_add_request(req, &nfsi->commit);
nfsi->ncommit++;
+ set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
spin_unlock(&nfsi->req_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return data->verf.committed != NFS_FILE_SYNC;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_mark_request_commit(req);
+ return 1;
+ }
+ if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ nfs_redirty_request(req);
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline void
+nfs_mark_request_commit(struct nfs_page *req)
+{
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return 0;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req)
+{
+ return 0;
+}
#endif
/*
*
* Interruptible by signals only if mounted with intr flag.
*/
-static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
+static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req;
- unsigned long idx_end, next;
+ pgoff_t idx_end, next;
unsigned int res = 0;
int error;
return res;
}
-static void nfs_cancel_dirty_list(struct list_head *head)
-{
- struct nfs_page *req;
- while(!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_inode_remove_request(req);
- nfs_clear_page_writeback(req);
- }
-}
-
static void nfs_cancel_commit_list(struct list_head *head)
{
struct nfs_page *req;
req = nfs_list_entry(head->next);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
nfs_inode_remove_request(req);
nfs_unlock_request(req);
}
* The requests are *not* checked to ensure that they form a contiguous set.
*/
static int
-nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
+nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
{
struct nfs_inode *nfsi = NFS_I(inode);
int res = 0;
return res;
}
#else
-static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
+static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
{
return 0;
}
#endif
-static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
-{
- struct backing_dev_info *bdi = mapping->backing_dev_info;
- DEFINE_WAIT(wait);
- int ret = 0;
-
- might_sleep();
-
- if (!bdi_write_congested(bdi))
- return 0;
-
- nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
-
- if (intr) {
- struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
- sigset_t oldset;
-
- rpc_clnt_sigmask(clnt, &oldset);
- prepare_to_wait(&nfs_write_congestion, &wait, TASK_INTERRUPTIBLE);
- if (bdi_write_congested(bdi)) {
- if (signalled())
- ret = -ERESTARTSYS;
- else
- schedule();
- }
- rpc_clnt_sigunmask(clnt, &oldset);
- } else {
- prepare_to_wait(&nfs_write_congestion, &wait, TASK_UNINTERRUPTIBLE);
- if (bdi_write_congested(bdi))
- schedule();
- }
- finish_wait(&nfs_write_congestion, &wait);
- return ret;
-}
-
-
/*
* Try to update any existing write request, or create one if there is none.
* In order to match, the request's credentials must match those of
static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
- struct inode *inode = page->mapping->host;
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req, *new = NULL;
- unsigned long rqend, end;
+ pgoff_t rqend, end;
end = offset + bytes;
- if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
- return ERR_PTR(-ERESTARTSYS);
for (;;) {
/* Loop over all inode entries and see if we find
* A request for the page we wish to update
dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
status, (long long)i_size_read(inode));
if (status < 0)
- ClearPageUptodate(page);
+ nfs_set_pageerror(page);
return status;
}
static void nfs_writepage_release(struct nfs_page *req)
{
- end_page_writeback(req->wb_page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (!PageError(req->wb_page)) {
- if (NFS_NEED_RESCHED(req)) {
- nfs_redirty_request(req);
- goto out;
- } else if (NFS_NEED_COMMIT(req)) {
- nfs_mark_request_commit(req);
- goto out;
- }
- }
- nfs_inode_remove_request(req);
-
-out:
- nfs_clear_commit(req);
- nfs_clear_reschedule(req);
-#else
- nfs_inode_remove_request(req);
-#endif
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
+ nfs_end_page_writeback(req->wb_page);
+ nfs_inode_remove_request(req);
+ } else
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
-static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
+static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req = nfs_list_entry(head->next);
struct page *page = req->wb_page;
nfs_list_remove_request(req);
- nbytes = req->wb_bytes;
+ nbytes = count;
do {
size_t len = min(nbytes, wsize);
- data = nfs_writedata_alloc(len);
+ data = nfs_writedata_alloc(1);
if (!data)
goto out_bad;
list_add(&data->pages, &list);
ClearPageError(page);
offset = 0;
- nbytes = req->wb_bytes;
+ nbytes = count;
do {
data = list_entry(list.next, struct nfs_write_data, pages);
list_del_init(&data->pages);
data->pagevec[0] = page;
- if (nbytes > wsize) {
- nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
- wsize, offset, how);
- offset += wsize;
- nbytes -= wsize;
- } else {
- nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
- nbytes, offset, how);
- nbytes = 0;
- }
+ if (nbytes < wsize)
+ wsize = nbytes;
+ nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
+ wsize, offset, how);
+ offset += wsize;
+ nbytes -= wsize;
nfs_execute_write(data);
} while (nbytes != 0);
nfs_writedata_release(data);
}
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
-static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
+static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
- unsigned int count;
- data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize);
+ data = nfs_writedata_alloc(npages);
if (!data)
goto out_bad;
pages = data->pagevec;
- count = 0;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &data->pages);
ClearPageError(req->wb_page);
*pages++ = req->wb_page;
- count += req->wb_bytes;
}
req = nfs_list_entry(data->pages.next);
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_redirty_request(req);
+ nfs_end_page_writeback(req->wb_page);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
-static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
+static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags)
{
- LIST_HEAD(one_request);
- int (*flush_one)(struct inode *, struct list_head *, int);
- struct nfs_page *req;
- int wpages = NFS_SERVER(inode)->wpages;
int wsize = NFS_SERVER(inode)->wsize;
- int error;
- flush_one = nfs_flush_one;
if (wsize < PAGE_CACHE_SIZE)
- flush_one = nfs_flush_multi;
- /* For single writes, FLUSH_STABLE is more efficient */
- if (npages <= wpages && npages == NFS_I(inode)->npages
- && nfs_list_entry(head->next)->wb_bytes <= wsize)
- how |= FLUSH_STABLE;
-
- do {
- nfs_coalesce_requests(head, &one_request, wpages);
- req = nfs_list_entry(one_request.next);
- error = flush_one(inode, &one_request, how);
- if (error < 0)
- goto out_err;
- } while (!list_empty(head));
- return 0;
-out_err:
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_redirty_request(req);
- nfs_clear_page_writeback(req);
- }
- return error;
+ nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
+ else
+ nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
}
/*
return;
if (task->tk_status < 0) {
- ClearPageUptodate(page);
- SetPageError(page);
+ nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
dprintk(", error = %d\n", task->tk_status);
- } else {
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->verf.committed < NFS_FILE_SYNC) {
- if (!NFS_NEED_COMMIT(req)) {
- nfs_defer_commit(req);
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- dprintk(" defer commit\n");
- } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
- nfs_defer_reschedule(req);
- dprintk(" server reboot detected\n");
- }
- } else
-#endif
- dprintk(" OK\n");
+ goto out;
}
+ if (nfs_write_need_commit(data)) {
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+ spin_lock(req_lock);
+ if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ /* Do nothing we need to resend the writes */
+ } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ dprintk(" defer commit\n");
+ } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+ set_bit(PG_NEED_RESCHED, &req->wb_flags);
+ clear_bit(PG_NEED_COMMIT, &req->wb_flags);
+ dprintk(" server reboot detected\n");
+ }
+ spin_unlock(req_lock);
+ } else
+ dprintk(" OK\n");
+
+out:
if (atomic_dec_and_test(&req->wb_complete))
nfs_writepage_release(req);
}
(long long)req_offset(req));
if (task->tk_status < 0) {
- ClearPageUptodate(page);
- SetPageError(page);
+ nfs_set_pageerror(page);
req->wb_context->error = task->tk_status;
- end_page_writeback(page);
- nfs_inode_remove_request(req);
dprintk(", error = %d\n", task->tk_status);
- goto next;
+ goto remove_request;
}
- end_page_writeback(page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) {
- nfs_inode_remove_request(req);
- dprintk(" OK\n");
+ if (nfs_write_need_commit(data)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ nfs_mark_request_commit(req);
+ nfs_end_page_writeback(page);
+ dprintk(" marked for commit\n");
goto next;
}
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req);
- dprintk(" marked for commit\n");
-#else
+ dprintk(" OK\n");
+remove_request:
+ nfs_end_page_writeback(page);
nfs_inode_remove_request(req);
-#endif
next:
nfs_clear_page_writeback(req);
}
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
+ clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dprintk("NFS: commit (%s/%Ld %d@%Ld)",
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
-#else
-static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
-{
- return 0;
-}
-#endif
-
-static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
-{
- struct nfs_inode *nfsi = NFS_I(mapping->host);
- LIST_HEAD(head);
- long res;
- spin_lock(&nfsi->req_lock);
- res = nfs_scan_dirty(mapping, wbc, &head);
- spin_unlock(&nfsi->req_lock);
- if (res) {
- int error = nfs_flush_list(mapping->host, &head, res, how);
- if (error < 0)
- return error;
- }
- return res;
-}
-
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
int nfs_commit_inode(struct inode *inode, int how)
{
struct nfs_inode *nfsi = NFS_I(inode);
}
return res;
}
+#else
+static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+{
+ return 0;
+}
#endif
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
{
struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
- unsigned long idx_start, idx_end;
+ pgoff_t idx_start, idx_end;
unsigned int npages = 0;
LIST_HEAD(head);
int nocommit = how & FLUSH_NOCOMMIT;
idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (idx_end > idx_start) {
- unsigned long l_npages = 1 + idx_end - idx_start;
+ pgoff_t l_npages = 1 + idx_end - idx_start;
npages = l_npages;
if (sizeof(npages) != sizeof(l_npages) &&
- (unsigned long)npages != l_npages)
+ (pgoff_t)npages != l_npages)
npages = 0;
}
}
how &= ~FLUSH_NOCOMMIT;
spin_lock(&nfsi->req_lock);
do {
- wbc->pages_skipped = 0;
ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
if (ret != 0)
continue;
- pages = nfs_scan_dirty(mapping, wbc, &head);
- if (pages != 0) {
- spin_unlock(&nfsi->req_lock);
- if (how & FLUSH_INVALIDATE) {
- nfs_cancel_dirty_list(&head);
- ret = pages;
- } else
- ret = nfs_flush_list(inode, &head, pages, how);
- spin_lock(&nfsi->req_lock);
- continue;
- }
- if (wbc->pages_skipped != 0)
- continue;
if (nocommit)
break;
pages = nfs_scan_commit(inode, &head, idx_start, npages);
- if (pages == 0) {
- if (wbc->pages_skipped != 0)
- continue;
+ if (pages == 0)
break;
- }
if (how & FLUSH_INVALIDATE) {
spin_unlock(&nfsi->req_lock);
nfs_cancel_commit_list(&head);
};
int ret;
- ret = generic_writepages(mapping, &wbc);
+ ret = nfs_writepages(mapping, &wbc);
if (ret < 0)
goto out;
ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
};
int ret;
- if (!(how & FLUSH_NOWRITEPAGE)) {
- ret = generic_writepages(mapping, &wbc);
- if (ret < 0)
- goto out;
- }
+ ret = nfs_writepages(mapping, &wbc);
+ if (ret < 0)
+ goto out;
ret = nfs_sync_mapping_wait(mapping, &wbc, how);
if (ret >= 0)
return 0;
int ret;
BUG_ON(!PageLocked(page));
- if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) {
+ if (clear_page_dirty_for_io(page)) {
ret = nfs_writepage_locked(page, &wbc);
if (ret < 0)
goto out;
int nfs_set_page_dirty(struct page *page)
{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ spinlock_t *req_lock;
struct nfs_page *req;
+ int ret;
- req = nfs_page_find_request(page);
+ if (!mapping)
+ goto out_raced;
+ inode = mapping->host;
+ if (!inode)
+ goto out_raced;
+ req_lock = &NFS_I(inode)->req_lock;
+ spin_lock(req_lock);
+ req = nfs_page_find_request_locked(page);
if (req != NULL) {
/* Mark any existing write requests for flushing */
- set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ spin_unlock(req_lock);
nfs_release_request(req);
+ return ret;
}
- return __set_page_dirty_nobuffers(page);
+ ret = __set_page_dirty_nobuffers(page);
+ spin_unlock(req_lock);
+ return ret;
+out_raced:
+ return !TestSetPageDirty(page);
}
if (nfs_commit_mempool == NULL)
return -ENOMEM;
+ /*
+ * NFS congestion size, scale with available memory.
+ *
+ * 64MB: 8192k
+ * 128MB: 11585k
+ * 256MB: 16384k
+ * 512MB: 23170k
+ * 1GB: 32768k
+ * 2GB: 46340k
+ * 4GB: 65536k
+ * 8GB: 92681k
+ * 16GB: 131072k
+ *
+ * This allows larger machines to have larger/more transfers.
+ * Limit the default to 256M
+ */
+ nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+ if (nfs_congestion_kb > 256*1024)
+ nfs_congestion_kb = 256*1024;
+
return 0;
}