From: OGAWA Hirofumi Date: Fri, 23 Jun 2006 09:03:26 +0000 (-0700) Subject: [PATCH] writeback: fix range handling X-Git-Tag: v2.6.18-rc1~1081^2~186 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=111ebb6e6f7bd7de6d722c5848e95621f43700d9;p=linux-2.6 [PATCH] writeback: fix range handling When a writeback_control's `start' and `end' fields are used to indicate a one-byte-range starting at file offset zero, the required values of .start=0,.end=0 mean that the ->writepages() implementation has no way of telling that it is being asked to perform a range request. Because we're currently overloading (start == 0 && end == 0) to mean "this is not a write-a-range request". To make all this sane, the patch changes range of writeback_control. So caller does: If it is calling ->writepages() to write pages, it sets range (range_start/end or range_cyclic) always. And if range_cyclic is true, ->writepages() thinks the range is cyclic, otherwise it just uses range_start and range_end. This patch does, - Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h -1 is usually ok for range_end (type is long long). But, if someone did, range_end += val; range_end is "val - 1" u64val = range_end >> bits; u64val is "~(0ULL)" or something, they are wrong. So, this adds LLONG_MAX to avoid nasty things, and uses LLONG_MAX for range_end. - All callers of ->writepages() sets range_start/end or range_cyclic. - Fix updates of ->writeback_index. It seems already bit strange. If it starts at 0 and ended by check of nr_to_write, this last index may reduce chance to scan end of file. So, this updates ->writeback_index only if range_cyclic is true or whole-file is scanned. Signed-off-by: OGAWA Hirofumi Cc: Nathan Scott Cc: Anton Altaparmakov Cc: Steven French Cc: "Vladimir V. Saveliev" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e2b4ce1dad..487ea8b3ba 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping, unsigned int bytes_written; struct cifs_sb_info *cifs_sb; int done = 0; - pgoff_t end = -1; + pgoff_t end; pgoff_t index; - int is_range = 0; + int range_whole = 0; struct kvec iov[32]; int len; int n_iov = 0; @@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping, xid = GetXid(); pagevec_init(&pvec, 0); - if (wbc->sync_mode == WB_SYNC_NONE) + if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ - else { - index = 0; - scanned = 1; - } - if (wbc->start || wbc->end) { - index = wbc->start >> PAGE_CACHE_SHIFT; - end = wbc->end >> PAGE_CACHE_SHIFT; - is_range = 1; + end = -1; + } else { + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; scanned = 1; } retry: @@ -1167,7 +1165,7 @@ retry: break; } - if (unlikely(is_range) && (page->index > end)) { + if (!wbc->range_cyclic && page->index > end) { done = 1; unlock_page(page); break; @@ -1271,7 +1269,7 @@ retry: index = 0; goto retry; } - if (!is_range) + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; FreeXid(xid); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f3fbe2d030..6db95cf3aa 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait) { struct writeback_control wbc = { .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, + .range_start = 0, + .range_end = LLONG_MAX, }; unsigned long nr_dirty = read_page_state(nr_dirty); unsigned long nr_unstable = read_page_state(nr_unstable); @@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync) struct writeback_control wbc = { .nr_to_write = LONG_MAX, .sync_mode = WB_SYNC_ALL, + .range_start = 0, + .range_end = LLONG_MAX, }; if (!mapping_cap_writeback_dirty(inode->i_mapping)) diff --git a/fs/mpage.c b/fs/mpage.c index 9bf2eb30e6..1e4598247d 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping, struct pagevec pvec; int nr_pages; pgoff_t index; - pgoff_t end = -1; /* Inclusive */ + pgoff_t end; /* Inclusive */ int scanned = 0; - int is_range = 0; + int range_whole = 0; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; @@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping, writepage = mapping->a_ops->writepage; pagevec_init(&pvec, 0); - if (wbc->sync_mode == WB_SYNC_NONE) { + if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ + end = -1; } else { - index = 0; /* whole-file sweep */ - scanned = 1; - } - if (wbc->start || wbc->end) { - index = wbc->start >> PAGE_CACHE_SHIFT; - end = wbc->end >> PAGE_CACHE_SHIFT; - is_range = 1; + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; scanned = 1; } retry: @@ -759,7 +757,7 @@ retry: continue; } - if (unlikely(is_range) && page->index > end) { + if (!wbc->range_cyclic && page->index > end) { done = 1; unlock_page(page); continue; @@ -810,7 +808,7 @@ retry: index = 0; goto retry; } - if (!is_range) + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; if (bio) mpage_bio_submit(WRITE, bio); diff --git a/fs/sync.c b/fs/sync.c index aab5ffe77e..955aef04da 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, } if (nbytes == 0) - endbyte = -1; + endbyte = LLONG_MAX; else endbyte--; /* inclusive */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f4fc576ed4..25fccd859f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -24,6 +24,9 @@ extern const char linux_banner[]; #define LONG_MAX ((long)(~0UL>>1)) #define LONG_MIN (-LONG_MAX - 1) #define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) #define STACK_MAGIC 0xdeadbeef diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 56f92fcbe9..9e38b566d0 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -50,14 +50,15 @@ struct writeback_control { * a hint that the filesystem need only write out the pages inside that * byterange. The byte at `end' is included in the writeout request. */ - loff_t start; - loff_t end; + loff_t range_start; + loff_t range_end; unsigned nonblocking:1; /* Don't get stuck on request queues */ unsigned encountered_congestion:1; /* An output: a queue is full */ unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_writepages:1; /* This is a writepages() call */ + unsigned range_cyclic:1; /* range_start is cyclic */ }; /* diff --git a/mm/filemap.c b/mm/filemap.c index fd57442186..3342067ca4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -190,8 +190,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = mapping->nrpages * 2, - .start = start, - .end = end, + .range_start = start, + .range_end = end, }; if (!mapping_cap_writeback_dirty(mapping)) @@ -204,7 +204,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { - return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); + return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 75d7f48b79..8ccf6f1b14 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping) .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .nr_to_write = write_chunk, + .range_cyclic = 1, }; get_dirty_limits(&wbs, &background_thresh, @@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages) .older_than_this = NULL, .nr_to_write = 0, .nonblocking = 1, + .range_cyclic = 1, }; for ( ; ; ) { @@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg) .nr_to_write = 0, .nonblocking = 1, .for_kupdate = 1, + .range_cyclic = 1, }; sync_supers(); diff --git a/mm/vmscan.c b/mm/vmscan.c index 46be8a0228..bc5d4f4303 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -339,6 +339,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping) struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, + .range_start = 0, + .range_end = LLONG_MAX, .nonblocking = 1, .for_reclaim = 1, };