]> err.no Git - linux-2.6/blob - mm/filemap.c
[PATCH] cpuset memory spread page cache implementation and hooks
[linux-2.6] / mm / filemap.c
1 /*
2  *      linux/mm/filemap.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * This file handles the generic file mmap semantics used by
9  * most "normal" filesystems (but you don't /have/ to use this:
10  * the NFS filesystem used to do this differently, for example)
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/aio.h>
18 #include <linux/capability.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/pagevec.h>
29 #include <linux/blkdev.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/cpuset.h>
33 #include "filemap.h"
34 #include "internal.h"
35
36 /*
37  * FIXME: remove all knowledge of the buffer layer from the core VM
38  */
39 #include <linux/buffer_head.h> /* for generic_osync_inode */
40
41 #include <asm/uaccess.h>
42 #include <asm/mman.h>
43
44 static ssize_t
45 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
46         loff_t offset, unsigned long nr_segs);
47
48 /*
49  * Shared mappings implemented 30.11.1994. It's not fully working yet,
50  * though.
51  *
52  * Shared mappings now work. 15.8.1995  Bruno.
53  *
54  * finished 'unifying' the page and buffer cache and SMP-threaded the
55  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
56  *
57  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
58  */
59
60 /*
61  * Lock ordering:
62  *
63  *  ->i_mmap_lock               (vmtruncate)
64  *    ->private_lock            (__free_pte->__set_page_dirty_buffers)
65  *      ->swap_lock             (exclusive_swap_page, others)
66  *        ->mapping->tree_lock
67  *
68  *  ->i_mutex
69  *    ->i_mmap_lock             (truncate->unmap_mapping_range)
70  *
71  *  ->mmap_sem
72  *    ->i_mmap_lock
73  *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
74  *        ->mapping->tree_lock  (arch-dependent flush_dcache_mmap_lock)
75  *
76  *  ->mmap_sem
77  *    ->lock_page               (access_process_vm)
78  *
79  *  ->mmap_sem
80  *    ->i_mutex                 (msync)
81  *
82  *  ->i_mutex
83  *    ->i_alloc_sem             (various)
84  *
85  *  ->inode_lock
86  *    ->sb_lock                 (fs/fs-writeback.c)
87  *    ->mapping->tree_lock      (__sync_single_inode)
88  *
89  *  ->i_mmap_lock
90  *    ->anon_vma.lock           (vma_adjust)
91  *
92  *  ->anon_vma.lock
93  *    ->page_table_lock or pte_lock     (anon_vma_prepare and various)
94  *
95  *  ->page_table_lock or pte_lock
96  *    ->swap_lock               (try_to_unmap_one)
97  *    ->private_lock            (try_to_unmap_one)
98  *    ->tree_lock               (try_to_unmap_one)
99  *    ->zone.lru_lock           (follow_page->mark_page_accessed)
100  *    ->zone.lru_lock           (check_pte_range->isolate_lru_page)
101  *    ->private_lock            (page_remove_rmap->set_page_dirty)
102  *    ->tree_lock               (page_remove_rmap->set_page_dirty)
103  *    ->inode_lock              (page_remove_rmap->set_page_dirty)
104  *    ->inode_lock              (zap_pte_range->set_page_dirty)
105  *    ->private_lock            (zap_pte_range->__set_page_dirty_buffers)
106  *
107  *  ->task->proc_lock
108  *    ->dcache_lock             (proc_pid_lookup)
109  */
110
111 /*
112  * Remove a page from the page cache and free it. Caller has to make
113  * sure the page is locked and that nobody else uses it - or that usage
114  * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
115  */
116 void __remove_from_page_cache(struct page *page)
117 {
118         struct address_space *mapping = page->mapping;
119
120         radix_tree_delete(&mapping->page_tree, page->index);
121         page->mapping = NULL;
122         mapping->nrpages--;
123         pagecache_acct(-1);
124 }
125
126 void remove_from_page_cache(struct page *page)
127 {
128         struct address_space *mapping = page->mapping;
129
130         BUG_ON(!PageLocked(page));
131
132         write_lock_irq(&mapping->tree_lock);
133         __remove_from_page_cache(page);
134         write_unlock_irq(&mapping->tree_lock);
135 }
136
137 static int sync_page(void *word)
138 {
139         struct address_space *mapping;
140         struct page *page;
141
142         page = container_of((unsigned long *)word, struct page, flags);
143
144         /*
145          * page_mapping() is being called without PG_locked held.
146          * Some knowledge of the state and use of the page is used to
147          * reduce the requirements down to a memory barrier.
148          * The danger here is of a stale page_mapping() return value
149          * indicating a struct address_space different from the one it's
150          * associated with when it is associated with one.
151          * After smp_mb(), it's either the correct page_mapping() for
152          * the page, or an old page_mapping() and the page's own
153          * page_mapping() has gone NULL.
154          * The ->sync_page() address_space operation must tolerate
155          * page_mapping() going NULL. By an amazing coincidence,
156          * this comes about because none of the users of the page
157          * in the ->sync_page() methods make essential use of the
158          * page_mapping(), merely passing the page down to the backing
159          * device's unplug functions when it's non-NULL, which in turn
160          * ignore it for all cases but swap, where only page_private(page) is
161          * of interest. When page_mapping() does go NULL, the entire
162          * call stack gracefully ignores the page and returns.
163          * -- wli
164          */
165         smp_mb();
166         mapping = page_mapping(page);
167         if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
168                 mapping->a_ops->sync_page(page);
169         io_schedule();
170         return 0;
171 }
172
173 /**
174  * filemap_fdatawrite_range - start writeback against all of a mapping's
175  * dirty pages that lie within the byte offsets <start, end>
176  * @mapping:    address space structure to write
177  * @start:      offset in bytes where the range starts
178  * @end:        offset in bytes where the range ends
179  * @sync_mode:  enable synchronous operation
180  *
181  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
182  * opposed to a regular memory * cleansing writeback.  The difference between
183  * these two operations is that if a dirty page/buffer is encountered, it must
184  * be waited upon, and not just skipped over.
185  */
186 static int __filemap_fdatawrite_range(struct address_space *mapping,
187         loff_t start, loff_t end, int sync_mode)
188 {
189         int ret;
190         struct writeback_control wbc = {
191                 .sync_mode = sync_mode,
192                 .nr_to_write = mapping->nrpages * 2,
193                 .start = start,
194                 .end = end,
195         };
196
197         if (!mapping_cap_writeback_dirty(mapping))
198                 return 0;
199
200         ret = do_writepages(mapping, &wbc);
201         return ret;
202 }
203
204 static inline int __filemap_fdatawrite(struct address_space *mapping,
205         int sync_mode)
206 {
207         return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
208 }
209
210 int filemap_fdatawrite(struct address_space *mapping)
211 {
212         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
213 }
214 EXPORT_SYMBOL(filemap_fdatawrite);
215
216 static int filemap_fdatawrite_range(struct address_space *mapping,
217         loff_t start, loff_t end)
218 {
219         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
220 }
221
222 /*
223  * This is a mostly non-blocking flush.  Not suitable for data-integrity
224  * purposes - I/O may not be started against all dirty pages.
225  */
226 int filemap_flush(struct address_space *mapping)
227 {
228         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
229 }
230 EXPORT_SYMBOL(filemap_flush);
231
232 /*
233  * Wait for writeback to complete against pages indexed by start->end
234  * inclusive
235  */
236 static int wait_on_page_writeback_range(struct address_space *mapping,
237                                 pgoff_t start, pgoff_t end)
238 {
239         struct pagevec pvec;
240         int nr_pages;
241         int ret = 0;
242         pgoff_t index;
243
244         if (end < start)
245                 return 0;
246
247         pagevec_init(&pvec, 0);
248         index = start;
249         while ((index <= end) &&
250                         (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
251                         PAGECACHE_TAG_WRITEBACK,
252                         min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
253                 unsigned i;
254
255                 for (i = 0; i < nr_pages; i++) {
256                         struct page *page = pvec.pages[i];
257
258                         /* until radix tree lookup accepts end_index */
259                         if (page->index > end)
260                                 continue;
261
262                         wait_on_page_writeback(page);
263                         if (PageError(page))
264                                 ret = -EIO;
265                 }
266                 pagevec_release(&pvec);
267                 cond_resched();
268         }
269
270         /* Check for outstanding write errors */
271         if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
272                 ret = -ENOSPC;
273         if (test_and_clear_bit(AS_EIO, &mapping->flags))
274                 ret = -EIO;
275
276         return ret;
277 }
278
279 /*
280  * Write and wait upon all the pages in the passed range.  This is a "data
281  * integrity" operation.  It waits upon in-flight writeout before starting and
282  * waiting upon new writeout.  If there was an IO error, return it.
283  *
284  * We need to re-take i_mutex during the generic_osync_inode list walk because
285  * it is otherwise livelockable.
286  */
287 int sync_page_range(struct inode *inode, struct address_space *mapping,
288                         loff_t pos, loff_t count)
289 {
290         pgoff_t start = pos >> PAGE_CACHE_SHIFT;
291         pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
292         int ret;
293
294         if (!mapping_cap_writeback_dirty(mapping) || !count)
295                 return 0;
296         ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
297         if (ret == 0) {
298                 mutex_lock(&inode->i_mutex);
299                 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
300                 mutex_unlock(&inode->i_mutex);
301         }
302         if (ret == 0)
303                 ret = wait_on_page_writeback_range(mapping, start, end);
304         return ret;
305 }
306 EXPORT_SYMBOL(sync_page_range);
307
308 /*
309  * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
310  * as it forces O_SYNC writers to different parts of the same file
311  * to be serialised right until io completion.
312  */
313 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
314                            loff_t pos, loff_t count)
315 {
316         pgoff_t start = pos >> PAGE_CACHE_SHIFT;
317         pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
318         int ret;
319
320         if (!mapping_cap_writeback_dirty(mapping) || !count)
321                 return 0;
322         ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
323         if (ret == 0)
324                 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
325         if (ret == 0)
326                 ret = wait_on_page_writeback_range(mapping, start, end);
327         return ret;
328 }
329 EXPORT_SYMBOL(sync_page_range_nolock);
330
331 /**
332  * filemap_fdatawait - walk the list of under-writeback pages of the given
333  *     address space and wait for all of them.
334  *
335  * @mapping: address space structure to wait for
336  */
337 int filemap_fdatawait(struct address_space *mapping)
338 {
339         loff_t i_size = i_size_read(mapping->host);
340
341         if (i_size == 0)
342                 return 0;
343
344         return wait_on_page_writeback_range(mapping, 0,
345                                 (i_size - 1) >> PAGE_CACHE_SHIFT);
346 }
347 EXPORT_SYMBOL(filemap_fdatawait);
348
349 int filemap_write_and_wait(struct address_space *mapping)
350 {
351         int err = 0;
352
353         if (mapping->nrpages) {
354                 err = filemap_fdatawrite(mapping);
355                 /*
356                  * Even if the above returned error, the pages may be
357                  * written partially (e.g. -ENOSPC), so we wait for it.
358                  * But the -EIO is special case, it may indicate the worst
359                  * thing (e.g. bug) happened, so we avoid waiting for it.
360                  */
361                 if (err != -EIO) {
362                         int err2 = filemap_fdatawait(mapping);
363                         if (!err)
364                                 err = err2;
365                 }
366         }
367         return err;
368 }
369 EXPORT_SYMBOL(filemap_write_and_wait);
370
371 int filemap_write_and_wait_range(struct address_space *mapping,
372                                  loff_t lstart, loff_t lend)
373 {
374         int err = 0;
375
376         if (mapping->nrpages) {
377                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
378                                                  WB_SYNC_ALL);
379                 /* See comment of filemap_write_and_wait() */
380                 if (err != -EIO) {
381                         int err2 = wait_on_page_writeback_range(mapping,
382                                                 lstart >> PAGE_CACHE_SHIFT,
383                                                 lend >> PAGE_CACHE_SHIFT);
384                         if (!err)
385                                 err = err2;
386                 }
387         }
388         return err;
389 }
390
391 /*
392  * This function is used to add newly allocated pagecache pages:
393  * the page is new, so we can just run SetPageLocked() against it.
394  * The other page state flags were set by rmqueue().
395  *
396  * This function does not add the page to the LRU.  The caller must do that.
397  */
398 int add_to_page_cache(struct page *page, struct address_space *mapping,
399                 pgoff_t offset, gfp_t gfp_mask)
400 {
401         int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
402
403         if (error == 0) {
404                 write_lock_irq(&mapping->tree_lock);
405                 error = radix_tree_insert(&mapping->page_tree, offset, page);
406                 if (!error) {
407                         page_cache_get(page);
408                         SetPageLocked(page);
409                         page->mapping = mapping;
410                         page->index = offset;
411                         mapping->nrpages++;
412                         pagecache_acct(1);
413                 }
414                 write_unlock_irq(&mapping->tree_lock);
415                 radix_tree_preload_end();
416         }
417         return error;
418 }
419
420 EXPORT_SYMBOL(add_to_page_cache);
421
422 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
423                                 pgoff_t offset, gfp_t gfp_mask)
424 {
425         int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
426         if (ret == 0)
427                 lru_cache_add(page);
428         return ret;
429 }
430
431 #ifdef CONFIG_NUMA
432 struct page *page_cache_alloc(struct address_space *x)
433 {
434         if (cpuset_do_page_mem_spread()) {
435                 int n = cpuset_mem_spread_node();
436                 return alloc_pages_node(n, mapping_gfp_mask(x), 0);
437         }
438         return alloc_pages(mapping_gfp_mask(x), 0);
439 }
440 EXPORT_SYMBOL(page_cache_alloc);
441
442 struct page *page_cache_alloc_cold(struct address_space *x)
443 {
444         if (cpuset_do_page_mem_spread()) {
445                 int n = cpuset_mem_spread_node();
446                 return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
447         }
448         return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
449 }
450 EXPORT_SYMBOL(page_cache_alloc_cold);
451 #endif
452
453 /*
454  * In order to wait for pages to become available there must be
455  * waitqueues associated with pages. By using a hash table of
456  * waitqueues where the bucket discipline is to maintain all
457  * waiters on the same queue and wake all when any of the pages
458  * become available, and for the woken contexts to check to be
459  * sure the appropriate page became available, this saves space
460  * at a cost of "thundering herd" phenomena during rare hash
461  * collisions.
462  */
463 static wait_queue_head_t *page_waitqueue(struct page *page)
464 {
465         const struct zone *zone = page_zone(page);
466
467         return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
468 }
469
470 static inline void wake_up_page(struct page *page, int bit)
471 {
472         __wake_up_bit(page_waitqueue(page), &page->flags, bit);
473 }
474
475 void fastcall wait_on_page_bit(struct page *page, int bit_nr)
476 {
477         DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
478
479         if (test_bit(bit_nr, &page->flags))
480                 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
481                                                         TASK_UNINTERRUPTIBLE);
482 }
483 EXPORT_SYMBOL(wait_on_page_bit);
484
485 /**
486  * unlock_page() - unlock a locked page
487  *
488  * @page: the page
489  *
490  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
491  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
492  * mechananism between PageLocked pages and PageWriteback pages is shared.
493  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
494  *
495  * The first mb is necessary to safely close the critical section opened by the
496  * TestSetPageLocked(), the second mb is necessary to enforce ordering between
497  * the clear_bit and the read of the waitqueue (to avoid SMP races with a
498  * parallel wait_on_page_locked()).
499  */
500 void fastcall unlock_page(struct page *page)
501 {
502         smp_mb__before_clear_bit();
503         if (!TestClearPageLocked(page))
504                 BUG();
505         smp_mb__after_clear_bit(); 
506         wake_up_page(page, PG_locked);
507 }
508 EXPORT_SYMBOL(unlock_page);
509
510 /*
511  * End writeback against a page.
512  */
513 void end_page_writeback(struct page *page)
514 {
515         if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
516                 if (!test_clear_page_writeback(page))
517                         BUG();
518         }
519         smp_mb__after_clear_bit();
520         wake_up_page(page, PG_writeback);
521 }
522 EXPORT_SYMBOL(end_page_writeback);
523
524 /*
525  * Get a lock on the page, assuming we need to sleep to get it.
526  *
527  * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
528  * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
529  * chances are that on the second loop, the block layer's plug list is empty,
530  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
531  */
532 void fastcall __lock_page(struct page *page)
533 {
534         DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
535
536         __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
537                                                         TASK_UNINTERRUPTIBLE);
538 }
539 EXPORT_SYMBOL(__lock_page);
540
541 /*
542  * a rather lightweight function, finding and getting a reference to a
543  * hashed page atomically.
544  */
545 struct page * find_get_page(struct address_space *mapping, unsigned long offset)
546 {
547         struct page *page;
548
549         read_lock_irq(&mapping->tree_lock);
550         page = radix_tree_lookup(&mapping->page_tree, offset);
551         if (page)
552                 page_cache_get(page);
553         read_unlock_irq(&mapping->tree_lock);
554         return page;
555 }
556
557 EXPORT_SYMBOL(find_get_page);
558
559 /*
560  * Same as above, but trylock it instead of incrementing the count.
561  */
562 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
563 {
564         struct page *page;
565
566         read_lock_irq(&mapping->tree_lock);
567         page = radix_tree_lookup(&mapping->page_tree, offset);
568         if (page && TestSetPageLocked(page))
569                 page = NULL;
570         read_unlock_irq(&mapping->tree_lock);
571         return page;
572 }
573
574 EXPORT_SYMBOL(find_trylock_page);
575
576 /**
577  * find_lock_page - locate, pin and lock a pagecache page
578  *
579  * @mapping: the address_space to search
580  * @offset: the page index
581  *
582  * Locates the desired pagecache page, locks it, increments its reference
583  * count and returns its address.
584  *
585  * Returns zero if the page was not present. find_lock_page() may sleep.
586  */
587 struct page *find_lock_page(struct address_space *mapping,
588                                 unsigned long offset)
589 {
590         struct page *page;
591
592         read_lock_irq(&mapping->tree_lock);
593 repeat:
594         page = radix_tree_lookup(&mapping->page_tree, offset);
595         if (page) {
596                 page_cache_get(page);
597                 if (TestSetPageLocked(page)) {
598                         read_unlock_irq(&mapping->tree_lock);
599                         __lock_page(page);
600                         read_lock_irq(&mapping->tree_lock);
601
602                         /* Has the page been truncated while we slept? */
603                         if (unlikely(page->mapping != mapping ||
604                                      page->index != offset)) {
605                                 unlock_page(page);
606                                 page_cache_release(page);
607                                 goto repeat;
608                         }
609                 }
610         }
611         read_unlock_irq(&mapping->tree_lock);
612         return page;
613 }
614
615 EXPORT_SYMBOL(find_lock_page);
616
617 /**
618  * find_or_create_page - locate or add a pagecache page
619  *
620  * @mapping: the page's address_space
621  * @index: the page's index into the mapping
622  * @gfp_mask: page allocation mode
623  *
624  * Locates a page in the pagecache.  If the page is not present, a new page
625  * is allocated using @gfp_mask and is added to the pagecache and to the VM's
626  * LRU list.  The returned page is locked and has its reference count
627  * incremented.
628  *
629  * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
630  * allocation!
631  *
632  * find_or_create_page() returns the desired page's address, or zero on
633  * memory exhaustion.
634  */
635 struct page *find_or_create_page(struct address_space *mapping,
636                 unsigned long index, gfp_t gfp_mask)
637 {
638         struct page *page, *cached_page = NULL;
639         int err;
640 repeat:
641         page = find_lock_page(mapping, index);
642         if (!page) {
643                 if (!cached_page) {
644                         cached_page = alloc_page(gfp_mask);
645                         if (!cached_page)
646                                 return NULL;
647                 }
648                 err = add_to_page_cache_lru(cached_page, mapping,
649                                         index, gfp_mask);
650                 if (!err) {
651                         page = cached_page;
652                         cached_page = NULL;
653                 } else if (err == -EEXIST)
654                         goto repeat;
655         }
656         if (cached_page)
657                 page_cache_release(cached_page);
658         return page;
659 }
660
661 EXPORT_SYMBOL(find_or_create_page);
662
663 /**
664  * find_get_pages - gang pagecache lookup
665  * @mapping:    The address_space to search
666  * @start:      The starting page index
667  * @nr_pages:   The maximum number of pages
668  * @pages:      Where the resulting pages are placed
669  *
670  * find_get_pages() will search for and return a group of up to
671  * @nr_pages pages in the mapping.  The pages are placed at @pages.
672  * find_get_pages() takes a reference against the returned pages.
673  *
674  * The search returns a group of mapping-contiguous pages with ascending
675  * indexes.  There may be holes in the indices due to not-present pages.
676  *
677  * find_get_pages() returns the number of pages which were found.
678  */
679 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
680                             unsigned int nr_pages, struct page **pages)
681 {
682         unsigned int i;
683         unsigned int ret;
684
685         read_lock_irq(&mapping->tree_lock);
686         ret = radix_tree_gang_lookup(&mapping->page_tree,
687                                 (void **)pages, start, nr_pages);
688         for (i = 0; i < ret; i++)
689                 page_cache_get(pages[i]);
690         read_unlock_irq(&mapping->tree_lock);
691         return ret;
692 }
693
694 /*
695  * Like find_get_pages, except we only return pages which are tagged with
696  * `tag'.   We update *index to index the next page for the traversal.
697  */
698 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
699                         int tag, unsigned int nr_pages, struct page **pages)
700 {
701         unsigned int i;
702         unsigned int ret;
703
704         read_lock_irq(&mapping->tree_lock);
705         ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
706                                 (void **)pages, *index, nr_pages, tag);
707         for (i = 0; i < ret; i++)
708                 page_cache_get(pages[i]);
709         if (ret)
710                 *index = pages[ret - 1]->index + 1;
711         read_unlock_irq(&mapping->tree_lock);
712         return ret;
713 }
714
715 /*
716  * Same as grab_cache_page, but do not wait if the page is unavailable.
717  * This is intended for speculative data generators, where the data can
718  * be regenerated if the page couldn't be grabbed.  This routine should
719  * be safe to call while holding the lock for another page.
720  *
721  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
722  * and deadlock against the caller's locked page.
723  */
724 struct page *
725 grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
726 {
727         struct page *page = find_get_page(mapping, index);
728         gfp_t gfp_mask;
729
730         if (page) {
731                 if (!TestSetPageLocked(page))
732                         return page;
733                 page_cache_release(page);
734                 return NULL;
735         }
736         gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
737         page = alloc_pages(gfp_mask, 0);
738         if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
739                 page_cache_release(page);
740                 page = NULL;
741         }
742         return page;
743 }
744
745 EXPORT_SYMBOL(grab_cache_page_nowait);
746
747 /*
748  * This is a generic file read routine, and uses the
749  * mapping->a_ops->readpage() function for the actual low-level
750  * stuff.
751  *
752  * This is really ugly. But the goto's actually try to clarify some
753  * of the logic when it comes to error handling etc.
754  *
755  * Note the struct file* is only passed for the use of readpage.  It may be
756  * NULL.
757  */
758 void do_generic_mapping_read(struct address_space *mapping,
759                              struct file_ra_state *_ra,
760                              struct file *filp,
761                              loff_t *ppos,
762                              read_descriptor_t *desc,
763                              read_actor_t actor)
764 {
765         struct inode *inode = mapping->host;
766         unsigned long index;
767         unsigned long end_index;
768         unsigned long offset;
769         unsigned long last_index;
770         unsigned long next_index;
771         unsigned long prev_index;
772         loff_t isize;
773         struct page *cached_page;
774         int error;
775         struct file_ra_state ra = *_ra;
776
777         cached_page = NULL;
778         index = *ppos >> PAGE_CACHE_SHIFT;
779         next_index = index;
780         prev_index = ra.prev_page;
781         last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
782         offset = *ppos & ~PAGE_CACHE_MASK;
783
784         isize = i_size_read(inode);
785         if (!isize)
786                 goto out;
787
788         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
789         for (;;) {
790                 struct page *page;
791                 unsigned long nr, ret;
792
793                 /* nr is the maximum number of bytes to copy from this page */
794                 nr = PAGE_CACHE_SIZE;
795                 if (index >= end_index) {
796                         if (index > end_index)
797                                 goto out;
798                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
799                         if (nr <= offset) {
800                                 goto out;
801                         }
802                 }
803                 nr = nr - offset;
804
805                 cond_resched();
806                 if (index == next_index)
807                         next_index = page_cache_readahead(mapping, &ra, filp,
808                                         index, last_index - index);
809
810 find_page:
811                 page = find_get_page(mapping, index);
812                 if (unlikely(page == NULL)) {
813                         handle_ra_miss(mapping, &ra, index);
814                         goto no_cached_page;
815                 }
816                 if (!PageUptodate(page))
817                         goto page_not_up_to_date;
818 page_ok:
819
820                 /* If users can be writing to this page using arbitrary
821                  * virtual addresses, take care about potential aliasing
822                  * before reading the page on the kernel side.
823                  */
824                 if (mapping_writably_mapped(mapping))
825                         flush_dcache_page(page);
826
827                 /*
828                  * When (part of) the same page is read multiple times
829                  * in succession, only mark it as accessed the first time.
830                  */
831                 if (prev_index != index)
832                         mark_page_accessed(page);
833                 prev_index = index;
834
835                 /*
836                  * Ok, we have the page, and it's up-to-date, so
837                  * now we can copy it to user space...
838                  *
839                  * The actor routine returns how many bytes were actually used..
840                  * NOTE! This may not be the same as how much of a user buffer
841                  * we filled up (we may be padding etc), so we can only update
842                  * "pos" here (the actor routine has to update the user buffer
843                  * pointers and the remaining count).
844                  */
845                 ret = actor(desc, page, offset, nr);
846                 offset += ret;
847                 index += offset >> PAGE_CACHE_SHIFT;
848                 offset &= ~PAGE_CACHE_MASK;
849
850                 page_cache_release(page);
851                 if (ret == nr && desc->count)
852                         continue;
853                 goto out;
854
855 page_not_up_to_date:
856                 /* Get exclusive access to the page ... */
857                 lock_page(page);
858
859                 /* Did it get unhashed before we got the lock? */
860                 if (!page->mapping) {
861                         unlock_page(page);
862                         page_cache_release(page);
863                         continue;
864                 }
865
866                 /* Did somebody else fill it already? */
867                 if (PageUptodate(page)) {
868                         unlock_page(page);
869                         goto page_ok;
870                 }
871
872 readpage:
873                 /* Start the actual read. The read will unlock the page. */
874                 error = mapping->a_ops->readpage(filp, page);
875
876                 if (unlikely(error)) {
877                         if (error == AOP_TRUNCATED_PAGE) {
878                                 page_cache_release(page);
879                                 goto find_page;
880                         }
881                         goto readpage_error;
882                 }
883
884                 if (!PageUptodate(page)) {
885                         lock_page(page);
886                         if (!PageUptodate(page)) {
887                                 if (page->mapping == NULL) {
888                                         /*
889                                          * invalidate_inode_pages got it
890                                          */
891                                         unlock_page(page);
892                                         page_cache_release(page);
893                                         goto find_page;
894                                 }
895                                 unlock_page(page);
896                                 error = -EIO;
897                                 goto readpage_error;
898                         }
899                         unlock_page(page);
900                 }
901
902                 /*
903                  * i_size must be checked after we have done ->readpage.
904                  *
905                  * Checking i_size after the readpage allows us to calculate
906                  * the correct value for "nr", which means the zero-filled
907                  * part of the page is not copied back to userspace (unless
908                  * another truncate extends the file - this is desired though).
909                  */
910                 isize = i_size_read(inode);
911                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
912                 if (unlikely(!isize || index > end_index)) {
913                         page_cache_release(page);
914                         goto out;
915                 }
916
917                 /* nr is the maximum number of bytes to copy from this page */
918                 nr = PAGE_CACHE_SIZE;
919                 if (index == end_index) {
920                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
921                         if (nr <= offset) {
922                                 page_cache_release(page);
923                                 goto out;
924                         }
925                 }
926                 nr = nr - offset;
927                 goto page_ok;
928
929 readpage_error:
930                 /* UHHUH! A synchronous read error occurred. Report it */
931                 desc->error = error;
932                 page_cache_release(page);
933                 goto out;
934
935 no_cached_page:
936                 /*
937                  * Ok, it wasn't cached, so we need to create a new
938                  * page..
939                  */
940                 if (!cached_page) {
941                         cached_page = page_cache_alloc_cold(mapping);
942                         if (!cached_page) {
943                                 desc->error = -ENOMEM;
944                                 goto out;
945                         }
946                 }
947                 error = add_to_page_cache_lru(cached_page, mapping,
948                                                 index, GFP_KERNEL);
949                 if (error) {
950                         if (error == -EEXIST)
951                                 goto find_page;
952                         desc->error = error;
953                         goto out;
954                 }
955                 page = cached_page;
956                 cached_page = NULL;
957                 goto readpage;
958         }
959
960 out:
961         *_ra = ra;
962
963         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
964         if (cached_page)
965                 page_cache_release(cached_page);
966         if (filp)
967                 file_accessed(filp);
968 }
969
970 EXPORT_SYMBOL(do_generic_mapping_read);
971
972 int file_read_actor(read_descriptor_t *desc, struct page *page,
973                         unsigned long offset, unsigned long size)
974 {
975         char *kaddr;
976         unsigned long left, count = desc->count;
977
978         if (size > count)
979                 size = count;
980
981         /*
982          * Faults on the destination of a read are common, so do it before
983          * taking the kmap.
984          */
985         if (!fault_in_pages_writeable(desc->arg.buf, size)) {
986                 kaddr = kmap_atomic(page, KM_USER0);
987                 left = __copy_to_user_inatomic(desc->arg.buf,
988                                                 kaddr + offset, size);
989                 kunmap_atomic(kaddr, KM_USER0);
990                 if (left == 0)
991                         goto success;
992         }
993
994         /* Do it the slow way */
995         kaddr = kmap(page);
996         left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
997         kunmap(page);
998
999         if (left) {
1000                 size -= left;
1001                 desc->error = -EFAULT;
1002         }
1003 success:
1004         desc->count = count - size;
1005         desc->written += size;
1006         desc->arg.buf += size;
1007         return size;
1008 }
1009
1010 /*
1011  * This is the "read()" routine for all filesystems
1012  * that can use the page cache directly.
1013  */
1014 ssize_t
1015 __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1016                 unsigned long nr_segs, loff_t *ppos)
1017 {
1018         struct file *filp = iocb->ki_filp;
1019         ssize_t retval;
1020         unsigned long seg;
1021         size_t count;
1022
1023         count = 0;
1024         for (seg = 0; seg < nr_segs; seg++) {
1025                 const struct iovec *iv = &iov[seg];
1026
1027                 /*
1028                  * If any segment has a negative length, or the cumulative
1029                  * length ever wraps negative then return -EINVAL.
1030                  */
1031                 count += iv->iov_len;
1032                 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
1033                         return -EINVAL;
1034                 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
1035                         continue;
1036                 if (seg == 0)
1037                         return -EFAULT;
1038                 nr_segs = seg;
1039                 count -= iv->iov_len;   /* This segment is no good */
1040                 break;
1041         }
1042
1043         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1044         if (filp->f_flags & O_DIRECT) {
1045                 loff_t pos = *ppos, size;
1046                 struct address_space *mapping;
1047                 struct inode *inode;
1048
1049                 mapping = filp->f_mapping;
1050                 inode = mapping->host;
1051                 retval = 0;
1052                 if (!count)
1053                         goto out; /* skip atime */
1054                 size = i_size_read(inode);
1055                 if (pos < size) {
1056                         retval = generic_file_direct_IO(READ, iocb,
1057                                                 iov, pos, nr_segs);
1058                         if (retval > 0 && !is_sync_kiocb(iocb))
1059                                 retval = -EIOCBQUEUED;
1060                         if (retval > 0)
1061                                 *ppos = pos + retval;
1062                 }
1063                 file_accessed(filp);
1064                 goto out;
1065         }
1066
1067         retval = 0;
1068         if (count) {
1069                 for (seg = 0; seg < nr_segs; seg++) {
1070                         read_descriptor_t desc;
1071
1072                         desc.written = 0;
1073                         desc.arg.buf = iov[seg].iov_base;
1074                         desc.count = iov[seg].iov_len;
1075                         if (desc.count == 0)
1076                                 continue;
1077                         desc.error = 0;
1078                         do_generic_file_read(filp,ppos,&desc,file_read_actor);
1079                         retval += desc.written;
1080                         if (desc.error) {
1081                                 retval = retval ?: desc.error;
1082                                 break;
1083                         }
1084                 }
1085         }
1086 out:
1087         return retval;
1088 }
1089
1090 EXPORT_SYMBOL(__generic_file_aio_read);
1091
1092 ssize_t
1093 generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1094 {
1095         struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1096
1097         BUG_ON(iocb->ki_pos != pos);
1098         return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1099 }
1100
1101 EXPORT_SYMBOL(generic_file_aio_read);
1102
1103 ssize_t
1104 generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1105 {
1106         struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1107         struct kiocb kiocb;
1108         ssize_t ret;
1109
1110         init_sync_kiocb(&kiocb, filp);
1111         ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
1112         if (-EIOCBQUEUED == ret)
1113                 ret = wait_on_sync_kiocb(&kiocb);
1114         return ret;
1115 }
1116
1117 EXPORT_SYMBOL(generic_file_read);
1118
1119 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1120 {
1121         ssize_t written;
1122         unsigned long count = desc->count;
1123         struct file *file = desc->arg.data;
1124
1125         if (size > count)
1126                 size = count;
1127
1128         written = file->f_op->sendpage(file, page, offset,
1129                                        size, &file->f_pos, size<count);
1130         if (written < 0) {
1131                 desc->error = written;
1132                 written = 0;
1133         }
1134         desc->count = count - written;
1135         desc->written += written;
1136         return written;
1137 }
1138
1139 ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1140                          size_t count, read_actor_t actor, void *target)
1141 {
1142         read_descriptor_t desc;
1143
1144         if (!count)
1145                 return 0;
1146
1147         desc.written = 0;
1148         desc.count = count;
1149         desc.arg.data = target;
1150         desc.error = 0;
1151
1152         do_generic_file_read(in_file, ppos, &desc, actor);
1153         if (desc.written)
1154                 return desc.written;
1155         return desc.error;
1156 }
1157
1158 EXPORT_SYMBOL(generic_file_sendfile);
1159
1160 static ssize_t
1161 do_readahead(struct address_space *mapping, struct file *filp,
1162              unsigned long index, unsigned long nr)
1163 {
1164         if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1165                 return -EINVAL;
1166
1167         force_page_cache_readahead(mapping, filp, index,
1168                                         max_sane_readahead(nr));
1169         return 0;
1170 }
1171
1172 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1173 {
1174         ssize_t ret;
1175         struct file *file;
1176
1177         ret = -EBADF;
1178         file = fget(fd);
1179         if (file) {
1180                 if (file->f_mode & FMODE_READ) {
1181                         struct address_space *mapping = file->f_mapping;
1182                         unsigned long start = offset >> PAGE_CACHE_SHIFT;
1183                         unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1184                         unsigned long len = end - start + 1;
1185                         ret = do_readahead(mapping, file, start, len);
1186                 }
1187                 fput(file);
1188         }
1189         return ret;
1190 }
1191
1192 #ifdef CONFIG_MMU
1193 /*
1194  * This adds the requested page to the page cache if it isn't already there,
1195  * and schedules an I/O to read in its contents from disk.
1196  */
1197 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1198 static int fastcall page_cache_read(struct file * file, unsigned long offset)
1199 {
1200         struct address_space *mapping = file->f_mapping;
1201         struct page *page; 
1202         int ret;
1203
1204         do {
1205                 page = page_cache_alloc_cold(mapping);
1206                 if (!page)
1207                         return -ENOMEM;
1208
1209                 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1210                 if (ret == 0)
1211                         ret = mapping->a_ops->readpage(file, page);
1212                 else if (ret == -EEXIST)
1213                         ret = 0; /* losing race to add is OK */
1214
1215                 page_cache_release(page);
1216
1217         } while (ret == AOP_TRUNCATED_PAGE);
1218                 
1219         return ret;
1220 }
1221
1222 #define MMAP_LOTSAMISS  (100)
1223
1224 /*
1225  * filemap_nopage() is invoked via the vma operations vector for a
1226  * mapped memory region to read in file data during a page fault.
1227  *
1228  * The goto's are kind of ugly, but this streamlines the normal case of having
1229  * it in the page cache, and handles the special cases reasonably without
1230  * having a lot of duplicated code.
1231  */
1232 struct page *filemap_nopage(struct vm_area_struct *area,
1233                                 unsigned long address, int *type)
1234 {
1235         int error;
1236         struct file *file = area->vm_file;
1237         struct address_space *mapping = file->f_mapping;
1238         struct file_ra_state *ra = &file->f_ra;
1239         struct inode *inode = mapping->host;
1240         struct page *page;
1241         unsigned long size, pgoff;
1242         int did_readaround = 0, majmin = VM_FAULT_MINOR;
1243
1244         pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1245
1246 retry_all:
1247         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1248         if (pgoff >= size)
1249                 goto outside_data_content;
1250
1251         /* If we don't want any read-ahead, don't bother */
1252         if (VM_RandomReadHint(area))
1253                 goto no_cached_page;
1254
1255         /*
1256          * The readahead code wants to be told about each and every page
1257          * so it can build and shrink its windows appropriately
1258          *
1259          * For sequential accesses, we use the generic readahead logic.
1260          */
1261         if (VM_SequentialReadHint(area))
1262                 page_cache_readahead(mapping, ra, file, pgoff, 1);
1263
1264         /*
1265          * Do we have something in the page cache already?
1266          */
1267 retry_find:
1268         page = find_get_page(mapping, pgoff);
1269         if (!page) {
1270                 unsigned long ra_pages;
1271
1272                 if (VM_SequentialReadHint(area)) {
1273                         handle_ra_miss(mapping, ra, pgoff);
1274                         goto no_cached_page;
1275                 }
1276                 ra->mmap_miss++;
1277
1278                 /*
1279                  * Do we miss much more than hit in this file? If so,
1280                  * stop bothering with read-ahead. It will only hurt.
1281                  */
1282                 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
1283                         goto no_cached_page;
1284
1285                 /*
1286                  * To keep the pgmajfault counter straight, we need to
1287                  * check did_readaround, as this is an inner loop.
1288                  */
1289                 if (!did_readaround) {
1290                         majmin = VM_FAULT_MAJOR;
1291                         inc_page_state(pgmajfault);
1292                 }
1293                 did_readaround = 1;
1294                 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1295                 if (ra_pages) {
1296                         pgoff_t start = 0;
1297
1298                         if (pgoff > ra_pages / 2)
1299                                 start = pgoff - ra_pages / 2;
1300                         do_page_cache_readahead(mapping, file, start, ra_pages);
1301                 }
1302                 page = find_get_page(mapping, pgoff);
1303                 if (!page)
1304                         goto no_cached_page;
1305         }
1306
1307         if (!did_readaround)
1308                 ra->mmap_hit++;
1309
1310         /*
1311          * Ok, found a page in the page cache, now we need to check
1312          * that it's up-to-date.
1313          */
1314         if (!PageUptodate(page))
1315                 goto page_not_uptodate;
1316
1317 success:
1318         /*
1319          * Found the page and have a reference on it.
1320          */
1321         mark_page_accessed(page);
1322         if (type)
1323                 *type = majmin;
1324         return page;
1325
1326 outside_data_content:
1327         /*
1328          * An external ptracer can access pages that normally aren't
1329          * accessible..
1330          */
1331         if (area->vm_mm == current->mm)
1332                 return NULL;
1333         /* Fall through to the non-read-ahead case */
1334 no_cached_page:
1335         /*
1336          * We're only likely to ever get here if MADV_RANDOM is in
1337          * effect.
1338          */
1339         error = page_cache_read(file, pgoff);
1340         grab_swap_token();
1341
1342         /*
1343          * The page we want has now been added to the page cache.
1344          * In the unlikely event that someone removed it in the
1345          * meantime, we'll just come back here and read it again.
1346          */
1347         if (error >= 0)
1348                 goto retry_find;
1349
1350         /*
1351          * An error return from page_cache_read can result if the
1352          * system is low on memory, or a problem occurs while trying
1353          * to schedule I/O.
1354          */
1355         if (error == -ENOMEM)
1356                 return NOPAGE_OOM;
1357         return NULL;
1358
1359 page_not_uptodate:
1360         if (!did_readaround) {
1361                 majmin = VM_FAULT_MAJOR;
1362                 inc_page_state(pgmajfault);
1363         }
1364         lock_page(page);
1365
1366         /* Did it get unhashed while we waited for it? */
1367         if (!page->mapping) {
1368                 unlock_page(page);
1369                 page_cache_release(page);
1370                 goto retry_all;
1371         }
1372
1373         /* Did somebody else get it up-to-date? */
1374         if (PageUptodate(page)) {
1375                 unlock_page(page);
1376                 goto success;
1377         }
1378
1379         error = mapping->a_ops->readpage(file, page);
1380         if (!error) {
1381                 wait_on_page_locked(page);
1382                 if (PageUptodate(page))
1383                         goto success;
1384         } else if (error == AOP_TRUNCATED_PAGE) {
1385                 page_cache_release(page);
1386                 goto retry_find;
1387         }
1388
1389         /*
1390          * Umm, take care of errors if the page isn't up-to-date.
1391          * Try to re-read it _once_. We do this synchronously,
1392          * because there really aren't any performance issues here
1393          * and we need to check for errors.
1394          */
1395         lock_page(page);
1396
1397         /* Somebody truncated the page on us? */
1398         if (!page->mapping) {
1399                 unlock_page(page);
1400                 page_cache_release(page);
1401                 goto retry_all;
1402         }
1403
1404         /* Somebody else successfully read it in? */
1405         if (PageUptodate(page)) {
1406                 unlock_page(page);
1407                 goto success;
1408         }
1409         ClearPageError(page);
1410         error = mapping->a_ops->readpage(file, page);
1411         if (!error) {
1412                 wait_on_page_locked(page);
1413                 if (PageUptodate(page))
1414                         goto success;
1415         } else if (error == AOP_TRUNCATED_PAGE) {
1416                 page_cache_release(page);
1417                 goto retry_find;
1418         }
1419
1420         /*
1421          * Things didn't work out. Return zero to tell the
1422          * mm layer so, possibly freeing the page cache page first.
1423          */
1424         page_cache_release(page);
1425         return NULL;
1426 }
1427
1428 EXPORT_SYMBOL(filemap_nopage);
1429
1430 static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
1431                                         int nonblock)
1432 {
1433         struct address_space *mapping = file->f_mapping;
1434         struct page *page;
1435         int error;
1436
1437         /*
1438          * Do we have something in the page cache already?
1439          */
1440 retry_find:
1441         page = find_get_page(mapping, pgoff);
1442         if (!page) {
1443                 if (nonblock)
1444                         return NULL;
1445                 goto no_cached_page;
1446         }
1447
1448         /*
1449          * Ok, found a page in the page cache, now we need to check
1450          * that it's up-to-date.
1451          */
1452         if (!PageUptodate(page)) {
1453                 if (nonblock) {
1454                         page_cache_release(page);
1455                         return NULL;
1456                 }
1457                 goto page_not_uptodate;
1458         }
1459
1460 success:
1461         /*
1462          * Found the page and have a reference on it.
1463          */
1464         mark_page_accessed(page);
1465         return page;
1466
1467 no_cached_page:
1468         error = page_cache_read(file, pgoff);
1469
1470         /*
1471          * The page we want has now been added to the page cache.
1472          * In the unlikely event that someone removed it in the
1473          * meantime, we'll just come back here and read it again.
1474          */
1475         if (error >= 0)
1476                 goto retry_find;
1477
1478         /*
1479          * An error return from page_cache_read can result if the
1480          * system is low on memory, or a problem occurs while trying
1481          * to schedule I/O.
1482          */
1483         return NULL;
1484
1485 page_not_uptodate:
1486         lock_page(page);
1487
1488         /* Did it get unhashed while we waited for it? */
1489         if (!page->mapping) {
1490                 unlock_page(page);
1491                 goto err;
1492         }
1493
1494         /* Did somebody else get it up-to-date? */
1495         if (PageUptodate(page)) {
1496                 unlock_page(page);
1497                 goto success;
1498         }
1499
1500         error = mapping->a_ops->readpage(file, page);
1501         if (!error) {
1502                 wait_on_page_locked(page);
1503                 if (PageUptodate(page))
1504                         goto success;
1505         } else if (error == AOP_TRUNCATED_PAGE) {
1506                 page_cache_release(page);
1507                 goto retry_find;
1508         }
1509
1510         /*
1511          * Umm, take care of errors if the page isn't up-to-date.
1512          * Try to re-read it _once_. We do this synchronously,
1513          * because there really aren't any performance issues here
1514          * and we need to check for errors.
1515          */
1516         lock_page(page);
1517
1518         /* Somebody truncated the page on us? */
1519         if (!page->mapping) {
1520                 unlock_page(page);
1521                 goto err;
1522         }
1523         /* Somebody else successfully read it in? */
1524         if (PageUptodate(page)) {
1525                 unlock_page(page);
1526                 goto success;
1527         }
1528
1529         ClearPageError(page);
1530         error = mapping->a_ops->readpage(file, page);
1531         if (!error) {
1532                 wait_on_page_locked(page);
1533                 if (PageUptodate(page))
1534                         goto success;
1535         } else if (error == AOP_TRUNCATED_PAGE) {
1536                 page_cache_release(page);
1537                 goto retry_find;
1538         }
1539
1540         /*
1541          * Things didn't work out. Return zero to tell the
1542          * mm layer so, possibly freeing the page cache page first.
1543          */
1544 err:
1545         page_cache_release(page);
1546
1547         return NULL;
1548 }
1549
1550 int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
1551                 unsigned long len, pgprot_t prot, unsigned long pgoff,
1552                 int nonblock)
1553 {
1554         struct file *file = vma->vm_file;
1555         struct address_space *mapping = file->f_mapping;
1556         struct inode *inode = mapping->host;
1557         unsigned long size;
1558         struct mm_struct *mm = vma->vm_mm;
1559         struct page *page;
1560         int err;
1561
1562         if (!nonblock)
1563                 force_page_cache_readahead(mapping, vma->vm_file,
1564                                         pgoff, len >> PAGE_CACHE_SHIFT);
1565
1566 repeat:
1567         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1568         if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
1569                 return -EINVAL;
1570
1571         page = filemap_getpage(file, pgoff, nonblock);
1572
1573         /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
1574          * done in shmem_populate calling shmem_getpage */
1575         if (!page && !nonblock)
1576                 return -ENOMEM;
1577
1578         if (page) {
1579                 err = install_page(mm, vma, addr, page, prot);
1580                 if (err) {
1581                         page_cache_release(page);
1582                         return err;
1583                 }
1584         } else if (vma->vm_flags & VM_NONLINEAR) {
1585                 /* No page was found just because we can't read it in now (being
1586                  * here implies nonblock != 0), but the page may exist, so set
1587                  * the PTE to fault it in later. */
1588                 err = install_file_pte(mm, vma, addr, pgoff, prot);
1589                 if (err)
1590                         return err;
1591         }
1592
1593         len -= PAGE_SIZE;
1594         addr += PAGE_SIZE;
1595         pgoff++;
1596         if (len)
1597                 goto repeat;
1598
1599         return 0;
1600 }
1601 EXPORT_SYMBOL(filemap_populate);
1602
1603 struct vm_operations_struct generic_file_vm_ops = {
1604         .nopage         = filemap_nopage,
1605         .populate       = filemap_populate,
1606 };
1607
1608 /* This is used for a general mmap of a disk file */
1609
1610 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1611 {
1612         struct address_space *mapping = file->f_mapping;
1613
1614         if (!mapping->a_ops->readpage)
1615                 return -ENOEXEC;
1616         file_accessed(file);
1617         vma->vm_ops = &generic_file_vm_ops;
1618         return 0;
1619 }
1620
1621 /*
1622  * This is for filesystems which do not implement ->writepage.
1623  */
1624 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1625 {
1626         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1627                 return -EINVAL;
1628         return generic_file_mmap(file, vma);
1629 }
1630 #else
1631 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1632 {
1633         return -ENOSYS;
1634 }
1635 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1636 {
1637         return -ENOSYS;
1638 }
1639 #endif /* CONFIG_MMU */
1640
1641 EXPORT_SYMBOL(generic_file_mmap);
1642 EXPORT_SYMBOL(generic_file_readonly_mmap);
1643
1644 static inline struct page *__read_cache_page(struct address_space *mapping,
1645                                 unsigned long index,
1646                                 int (*filler)(void *,struct page*),
1647                                 void *data)
1648 {
1649         struct page *page, *cached_page = NULL;
1650         int err;
1651 repeat:
1652         page = find_get_page(mapping, index);
1653         if (!page) {
1654                 if (!cached_page) {
1655                         cached_page = page_cache_alloc_cold(mapping);
1656                         if (!cached_page)
1657                                 return ERR_PTR(-ENOMEM);
1658                 }
1659                 err = add_to_page_cache_lru(cached_page, mapping,
1660                                         index, GFP_KERNEL);
1661                 if (err == -EEXIST)
1662                         goto repeat;
1663                 if (err < 0) {
1664                         /* Presumably ENOMEM for radix tree node */
1665                         page_cache_release(cached_page);
1666                         return ERR_PTR(err);
1667                 }
1668                 page = cached_page;
1669                 cached_page = NULL;
1670                 err = filler(data, page);
1671                 if (err < 0) {
1672                         page_cache_release(page);
1673                         page = ERR_PTR(err);
1674                 }
1675         }
1676         if (cached_page)
1677                 page_cache_release(cached_page);
1678         return page;
1679 }
1680
1681 /*
1682  * Read into the page cache. If a page already exists,
1683  * and PageUptodate() is not set, try to fill the page.
1684  */
1685 struct page *read_cache_page(struct address_space *mapping,
1686                                 unsigned long index,
1687                                 int (*filler)(void *,struct page*),
1688                                 void *data)
1689 {
1690         struct page *page;
1691         int err;
1692
1693 retry:
1694         page = __read_cache_page(mapping, index, filler, data);
1695         if (IS_ERR(page))
1696                 goto out;
1697         mark_page_accessed(page);
1698         if (PageUptodate(page))
1699                 goto out;
1700
1701         lock_page(page);
1702         if (!page->mapping) {
1703                 unlock_page(page);
1704                 page_cache_release(page);
1705                 goto retry;
1706         }
1707         if (PageUptodate(page)) {
1708                 unlock_page(page);
1709                 goto out;
1710         }
1711         err = filler(data, page);
1712         if (err < 0) {
1713                 page_cache_release(page);
1714                 page = ERR_PTR(err);
1715         }
1716  out:
1717         return page;
1718 }
1719
1720 EXPORT_SYMBOL(read_cache_page);
1721
1722 /*
1723  * If the page was newly created, increment its refcount and add it to the
1724  * caller's lru-buffering pagevec.  This function is specifically for
1725  * generic_file_write().
1726  */
1727 static inline struct page *
1728 __grab_cache_page(struct address_space *mapping, unsigned long index,
1729                         struct page **cached_page, struct pagevec *lru_pvec)
1730 {
1731         int err;
1732         struct page *page;
1733 repeat:
1734         page = find_lock_page(mapping, index);
1735         if (!page) {
1736                 if (!*cached_page) {
1737                         *cached_page = page_cache_alloc(mapping);
1738                         if (!*cached_page)
1739                                 return NULL;
1740                 }
1741                 err = add_to_page_cache(*cached_page, mapping,
1742                                         index, GFP_KERNEL);
1743                 if (err == -EEXIST)
1744                         goto repeat;
1745                 if (err == 0) {
1746                         page = *cached_page;
1747                         page_cache_get(page);
1748                         if (!pagevec_add(lru_pvec, page))
1749                                 __pagevec_lru_add(lru_pvec);
1750                         *cached_page = NULL;
1751                 }
1752         }
1753         return page;
1754 }
1755
1756 /*
1757  * The logic we want is
1758  *
1759  *      if suid or (sgid and xgrp)
1760  *              remove privs
1761  */
1762 int remove_suid(struct dentry *dentry)
1763 {
1764         mode_t mode = dentry->d_inode->i_mode;
1765         int kill = 0;
1766         int result = 0;
1767
1768         /* suid always must be killed */
1769         if (unlikely(mode & S_ISUID))
1770                 kill = ATTR_KILL_SUID;
1771
1772         /*
1773          * sgid without any exec bits is just a mandatory locking mark; leave
1774          * it alone.  If some exec bits are set, it's a real sgid; kill it.
1775          */
1776         if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1777                 kill |= ATTR_KILL_SGID;
1778
1779         if (unlikely(kill && !capable(CAP_FSETID))) {
1780                 struct iattr newattrs;
1781
1782                 newattrs.ia_valid = ATTR_FORCE | kill;
1783                 result = notify_change(dentry, &newattrs);
1784         }
1785         return result;
1786 }
1787 EXPORT_SYMBOL(remove_suid);
1788
1789 size_t
1790 __filemap_copy_from_user_iovec(char *vaddr, 
1791                         const struct iovec *iov, size_t base, size_t bytes)
1792 {
1793         size_t copied = 0, left = 0;
1794
1795         while (bytes) {
1796                 char __user *buf = iov->iov_base + base;
1797                 int copy = min(bytes, iov->iov_len - base);
1798
1799                 base = 0;
1800                 left = __copy_from_user_inatomic(vaddr, buf, copy);
1801                 copied += copy;
1802                 bytes -= copy;
1803                 vaddr += copy;
1804                 iov++;
1805
1806                 if (unlikely(left)) {
1807                         /* zero the rest of the target like __copy_from_user */
1808                         if (bytes)
1809                                 memset(vaddr, 0, bytes);
1810                         break;
1811                 }
1812         }
1813         return copied - left;
1814 }
1815
1816 /*
1817  * Performs necessary checks before doing a write
1818  *
1819  * Can adjust writing position aor amount of bytes to write.
1820  * Returns appropriate error code that caller should return or
1821  * zero in case that write should be allowed.
1822  */
1823 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1824 {
1825         struct inode *inode = file->f_mapping->host;
1826         unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1827
1828         if (unlikely(*pos < 0))
1829                 return -EINVAL;
1830
1831         if (!isblk) {
1832                 /* FIXME: this is for backwards compatibility with 2.4 */
1833                 if (file->f_flags & O_APPEND)
1834                         *pos = i_size_read(inode);
1835
1836                 if (limit != RLIM_INFINITY) {
1837                         if (*pos >= limit) {
1838                                 send_sig(SIGXFSZ, current, 0);
1839                                 return -EFBIG;
1840                         }
1841                         if (*count > limit - (typeof(limit))*pos) {
1842                                 *count = limit - (typeof(limit))*pos;
1843                         }
1844                 }
1845         }
1846
1847         /*
1848          * LFS rule
1849          */
1850         if (unlikely(*pos + *count > MAX_NON_LFS &&
1851                                 !(file->f_flags & O_LARGEFILE))) {
1852                 if (*pos >= MAX_NON_LFS) {
1853                         send_sig(SIGXFSZ, current, 0);
1854                         return -EFBIG;
1855                 }
1856                 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1857                         *count = MAX_NON_LFS - (unsigned long)*pos;
1858                 }
1859         }
1860
1861         /*
1862          * Are we about to exceed the fs block limit ?
1863          *
1864          * If we have written data it becomes a short write.  If we have
1865          * exceeded without writing data we send a signal and return EFBIG.
1866          * Linus frestrict idea will clean these up nicely..
1867          */
1868         if (likely(!isblk)) {
1869                 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1870                         if (*count || *pos > inode->i_sb->s_maxbytes) {
1871                                 send_sig(SIGXFSZ, current, 0);
1872                                 return -EFBIG;
1873                         }
1874                         /* zero-length writes at ->s_maxbytes are OK */
1875                 }
1876
1877                 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1878                         *count = inode->i_sb->s_maxbytes - *pos;
1879         } else {
1880                 loff_t isize;
1881                 if (bdev_read_only(I_BDEV(inode)))
1882                         return -EPERM;
1883                 isize = i_size_read(inode);
1884                 if (*pos >= isize) {
1885                         if (*count || *pos > isize)
1886                                 return -ENOSPC;
1887                 }
1888
1889                 if (*pos + *count > isize)
1890                         *count = isize - *pos;
1891         }
1892         return 0;
1893 }
1894 EXPORT_SYMBOL(generic_write_checks);
1895
1896 ssize_t
1897 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1898                 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1899                 size_t count, size_t ocount)
1900 {
1901         struct file     *file = iocb->ki_filp;
1902         struct address_space *mapping = file->f_mapping;
1903         struct inode    *inode = mapping->host;
1904         ssize_t         written;
1905
1906         if (count != ocount)
1907                 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1908
1909         written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1910         if (written > 0) {
1911                 loff_t end = pos + written;
1912                 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1913                         i_size_write(inode,  end);
1914                         mark_inode_dirty(inode);
1915                 }
1916                 *ppos = end;
1917         }
1918
1919         /*
1920          * Sync the fs metadata but not the minor inode changes and
1921          * of course not the data as we did direct DMA for the IO.
1922          * i_mutex is held, which protects generic_osync_inode() from
1923          * livelocking.
1924          */
1925         if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
1926                 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
1927                 if (err < 0)
1928                         written = err;
1929         }
1930         if (written == count && !is_sync_kiocb(iocb))
1931                 written = -EIOCBQUEUED;
1932         return written;
1933 }
1934 EXPORT_SYMBOL(generic_file_direct_write);
1935
1936 ssize_t
1937 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1938                 unsigned long nr_segs, loff_t pos, loff_t *ppos,
1939                 size_t count, ssize_t written)
1940 {
1941         struct file *file = iocb->ki_filp;
1942         struct address_space * mapping = file->f_mapping;
1943         struct address_space_operations *a_ops = mapping->a_ops;
1944         struct inode    *inode = mapping->host;
1945         long            status = 0;
1946         struct page     *page;
1947         struct page     *cached_page = NULL;
1948         size_t          bytes;
1949         struct pagevec  lru_pvec;
1950         const struct iovec *cur_iov = iov; /* current iovec */
1951         size_t          iov_base = 0;      /* offset in the current iovec */
1952         char __user     *buf;
1953
1954         pagevec_init(&lru_pvec, 0);
1955
1956         /*
1957          * handle partial DIO write.  Adjust cur_iov if needed.
1958          */
1959         if (likely(nr_segs == 1))
1960                 buf = iov->iov_base + written;
1961         else {
1962                 filemap_set_next_iovec(&cur_iov, &iov_base, written);
1963                 buf = cur_iov->iov_base + iov_base;
1964         }
1965
1966         do {
1967                 unsigned long index;
1968                 unsigned long offset;
1969                 unsigned long maxlen;
1970                 size_t copied;
1971
1972                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1973                 index = pos >> PAGE_CACHE_SHIFT;
1974                 bytes = PAGE_CACHE_SIZE - offset;
1975                 if (bytes > count)
1976                         bytes = count;
1977
1978                 /*
1979                  * Bring in the user page that we will copy from _first_.
1980                  * Otherwise there's a nasty deadlock on copying from the
1981                  * same page as we're writing to, without it being marked
1982                  * up-to-date.
1983                  */
1984                 maxlen = cur_iov->iov_len - iov_base;
1985                 if (maxlen > bytes)
1986                         maxlen = bytes;
1987                 fault_in_pages_readable(buf, maxlen);
1988
1989                 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
1990                 if (!page) {
1991                         status = -ENOMEM;
1992                         break;
1993                 }
1994
1995                 status = a_ops->prepare_write(file, page, offset, offset+bytes);
1996                 if (unlikely(status)) {
1997                         loff_t isize = i_size_read(inode);
1998
1999                         if (status != AOP_TRUNCATED_PAGE)
2000                                 unlock_page(page);
2001                         page_cache_release(page);
2002                         if (status == AOP_TRUNCATED_PAGE)
2003                                 continue;
2004                         /*
2005                          * prepare_write() may have instantiated a few blocks
2006                          * outside i_size.  Trim these off again.
2007                          */
2008                         if (pos + bytes > isize)
2009                                 vmtruncate(inode, isize);
2010                         break;
2011                 }
2012                 if (likely(nr_segs == 1))
2013                         copied = filemap_copy_from_user(page, offset,
2014                                                         buf, bytes);
2015                 else
2016                         copied = filemap_copy_from_user_iovec(page, offset,
2017                                                 cur_iov, iov_base, bytes);
2018                 flush_dcache_page(page);
2019                 status = a_ops->commit_write(file, page, offset, offset+bytes);
2020                 if (status == AOP_TRUNCATED_PAGE) {
2021                         page_cache_release(page);
2022                         continue;
2023                 }
2024                 if (likely(copied > 0)) {
2025                         if (!status)
2026                                 status = copied;
2027
2028                         if (status >= 0) {
2029                                 written += status;
2030                                 count -= status;
2031                                 pos += status;
2032                                 buf += status;
2033                                 if (unlikely(nr_segs > 1)) {
2034                                         filemap_set_next_iovec(&cur_iov,
2035                                                         &iov_base, status);
2036                                         if (count)
2037                                                 buf = cur_iov->iov_base +
2038                                                         iov_base;
2039                                 } else {
2040                                         iov_base += status;
2041                                 }
2042                         }
2043                 }
2044                 if (unlikely(copied != bytes))
2045                         if (status >= 0)
2046                                 status = -EFAULT;
2047                 unlock_page(page);
2048                 mark_page_accessed(page);
2049                 page_cache_release(page);
2050                 if (status < 0)
2051                         break;
2052                 balance_dirty_pages_ratelimited(mapping);
2053                 cond_resched();
2054         } while (count);
2055         *ppos = pos;
2056
2057         if (cached_page)
2058                 page_cache_release(cached_page);
2059
2060         /*
2061          * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
2062          */
2063         if (likely(status >= 0)) {
2064                 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2065                         if (!a_ops->writepage || !is_sync_kiocb(iocb))
2066                                 status = generic_osync_inode(inode, mapping,
2067                                                 OSYNC_METADATA|OSYNC_DATA);
2068                 }
2069         }
2070         
2071         /*
2072          * If we get here for O_DIRECT writes then we must have fallen through
2073          * to buffered writes (block instantiation inside i_size).  So we sync
2074          * the file data here, to try to honour O_DIRECT expectations.
2075          */
2076         if (unlikely(file->f_flags & O_DIRECT) && written)
2077                 status = filemap_write_and_wait(mapping);
2078
2079         pagevec_lru_add(&lru_pvec);
2080         return written ? written : status;
2081 }
2082 EXPORT_SYMBOL(generic_file_buffered_write);
2083
2084 static ssize_t
2085 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2086                                 unsigned long nr_segs, loff_t *ppos)
2087 {
2088         struct file *file = iocb->ki_filp;
2089         struct address_space * mapping = file->f_mapping;
2090         size_t ocount;          /* original count */
2091         size_t count;           /* after file limit checks */
2092         struct inode    *inode = mapping->host;
2093         unsigned long   seg;
2094         loff_t          pos;
2095         ssize_t         written;
2096         ssize_t         err;
2097
2098         ocount = 0;
2099         for (seg = 0; seg < nr_segs; seg++) {
2100                 const struct iovec *iv = &iov[seg];
2101
2102                 /*
2103                  * If any segment has a negative length, or the cumulative
2104                  * length ever wraps negative then return -EINVAL.
2105                  */
2106                 ocount += iv->iov_len;
2107                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2108                         return -EINVAL;
2109                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2110                         continue;
2111                 if (seg == 0)
2112                         return -EFAULT;
2113                 nr_segs = seg;
2114                 ocount -= iv->iov_len;  /* This segment is no good */
2115                 break;
2116         }
2117
2118         count = ocount;
2119         pos = *ppos;
2120
2121         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2122
2123         /* We can write back this queue in page reclaim */
2124         current->backing_dev_info = mapping->backing_dev_info;
2125         written = 0;
2126
2127         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2128         if (err)
2129                 goto out;
2130
2131         if (count == 0)
2132                 goto out;
2133
2134         err = remove_suid(file->f_dentry);
2135         if (err)
2136                 goto out;
2137
2138         file_update_time(file);
2139
2140         /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2141         if (unlikely(file->f_flags & O_DIRECT)) {
2142                 written = generic_file_direct_write(iocb, iov,
2143                                 &nr_segs, pos, ppos, count, ocount);
2144                 if (written < 0 || written == count)
2145                         goto out;
2146                 /*
2147                  * direct-io write to a hole: fall through to buffered I/O
2148                  * for completing the rest of the request.
2149                  */
2150                 pos += written;
2151                 count -= written;
2152         }
2153
2154         written = generic_file_buffered_write(iocb, iov, nr_segs,
2155                         pos, ppos, count, written);
2156 out:
2157         current->backing_dev_info = NULL;
2158         return written ? written : err;
2159 }
2160 EXPORT_SYMBOL(generic_file_aio_write_nolock);
2161
2162 ssize_t
2163 generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2164                                 unsigned long nr_segs, loff_t *ppos)
2165 {
2166         struct file *file = iocb->ki_filp;
2167         struct address_space *mapping = file->f_mapping;
2168         struct inode *inode = mapping->host;
2169         ssize_t ret;
2170         loff_t pos = *ppos;
2171
2172         ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
2173
2174         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2175                 int err;
2176
2177                 err = sync_page_range_nolock(inode, mapping, pos, ret);
2178                 if (err < 0)
2179                         ret = err;
2180         }
2181         return ret;
2182 }
2183
2184 static ssize_t
2185 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
2186                                 unsigned long nr_segs, loff_t *ppos)
2187 {
2188         struct kiocb kiocb;
2189         ssize_t ret;
2190
2191         init_sync_kiocb(&kiocb, file);
2192         ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2193         if (ret == -EIOCBQUEUED)
2194                 ret = wait_on_sync_kiocb(&kiocb);
2195         return ret;
2196 }
2197
2198 ssize_t
2199 generic_file_write_nolock(struct file *file, const struct iovec *iov,
2200                                 unsigned long nr_segs, loff_t *ppos)
2201 {
2202         struct kiocb kiocb;
2203         ssize_t ret;
2204
2205         init_sync_kiocb(&kiocb, file);
2206         ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2207         if (-EIOCBQUEUED == ret)
2208                 ret = wait_on_sync_kiocb(&kiocb);
2209         return ret;
2210 }
2211 EXPORT_SYMBOL(generic_file_write_nolock);
2212
2213 ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
2214                                size_t count, loff_t pos)
2215 {
2216         struct file *file = iocb->ki_filp;
2217         struct address_space *mapping = file->f_mapping;
2218         struct inode *inode = mapping->host;
2219         ssize_t ret;
2220         struct iovec local_iov = { .iov_base = (void __user *)buf,
2221                                         .iov_len = count };
2222
2223         BUG_ON(iocb->ki_pos != pos);
2224
2225         mutex_lock(&inode->i_mutex);
2226         ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
2227                                                 &iocb->ki_pos);
2228         mutex_unlock(&inode->i_mutex);
2229
2230         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2231                 ssize_t err;
2232
2233                 err = sync_page_range(inode, mapping, pos, ret);
2234                 if (err < 0)
2235                         ret = err;
2236         }
2237         return ret;
2238 }
2239 EXPORT_SYMBOL(generic_file_aio_write);
2240
2241 ssize_t generic_file_write(struct file *file, const char __user *buf,
2242                            size_t count, loff_t *ppos)
2243 {
2244         struct address_space *mapping = file->f_mapping;
2245         struct inode *inode = mapping->host;
2246         ssize_t ret;
2247         struct iovec local_iov = { .iov_base = (void __user *)buf,
2248                                         .iov_len = count };
2249
2250         mutex_lock(&inode->i_mutex);
2251         ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
2252         mutex_unlock(&inode->i_mutex);
2253
2254         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2255                 ssize_t err;
2256
2257                 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2258                 if (err < 0)
2259                         ret = err;
2260         }
2261         return ret;
2262 }
2263 EXPORT_SYMBOL(generic_file_write);
2264
2265 ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
2266                         unsigned long nr_segs, loff_t *ppos)
2267 {
2268         struct kiocb kiocb;
2269         ssize_t ret;
2270
2271         init_sync_kiocb(&kiocb, filp);
2272         ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
2273         if (-EIOCBQUEUED == ret)
2274                 ret = wait_on_sync_kiocb(&kiocb);
2275         return ret;
2276 }
2277 EXPORT_SYMBOL(generic_file_readv);
2278
2279 ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
2280                         unsigned long nr_segs, loff_t *ppos)
2281 {
2282         struct address_space *mapping = file->f_mapping;
2283         struct inode *inode = mapping->host;
2284         ssize_t ret;
2285
2286         mutex_lock(&inode->i_mutex);
2287         ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
2288         mutex_unlock(&inode->i_mutex);
2289
2290         if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2291                 int err;
2292
2293                 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2294                 if (err < 0)
2295                         ret = err;
2296         }
2297         return ret;
2298 }
2299 EXPORT_SYMBOL(generic_file_writev);
2300
2301 /*
2302  * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
2303  * went wrong during pagecache shootdown.
2304  */
2305 static ssize_t
2306 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2307         loff_t offset, unsigned long nr_segs)
2308 {
2309         struct file *file = iocb->ki_filp;
2310         struct address_space *mapping = file->f_mapping;
2311         ssize_t retval;
2312         size_t write_len = 0;
2313
2314         /*
2315          * If it's a write, unmap all mmappings of the file up-front.  This
2316          * will cause any pte dirty bits to be propagated into the pageframes
2317          * for the subsequent filemap_write_and_wait().
2318          */
2319         if (rw == WRITE) {
2320                 write_len = iov_length(iov, nr_segs);
2321                 if (mapping_mapped(mapping))
2322                         unmap_mapping_range(mapping, offset, write_len, 0);
2323         }
2324
2325         retval = filemap_write_and_wait(mapping);
2326         if (retval == 0) {
2327                 retval = mapping->a_ops->direct_IO(rw, iocb, iov,
2328                                                 offset, nr_segs);
2329                 if (rw == WRITE && mapping->nrpages) {
2330                         pgoff_t end = (offset + write_len - 1)
2331                                                 >> PAGE_CACHE_SHIFT;
2332                         int err = invalidate_inode_pages2_range(mapping,
2333                                         offset >> PAGE_CACHE_SHIFT, end);
2334                         if (err)
2335                                 retval = err;
2336                 }
2337         }
2338         return retval;
2339 }