2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include <linux/migrate.h>
33 #include "xfs_linux.h"
35 STATIC kmem_zone_t *xfs_buf_zone;
36 STATIC kmem_shaker_t xfs_buf_shake;
37 STATIC int xfsbufd(void *);
38 STATIC int xfsbufd_wakeup(int, gfp_t);
39 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
41 STATIC struct workqueue_struct *xfslogd_workqueue;
42 struct workqueue_struct *xfsdatad_workqueue;
52 ktrace_enter(xfs_buf_trace_buf,
54 (void *)(unsigned long)bp->b_flags,
55 (void *)(unsigned long)bp->b_hold.counter,
56 (void *)(unsigned long)bp->b_sema.count.counter,
59 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
60 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
61 (void *)(unsigned long)bp->b_buffer_length,
62 NULL, NULL, NULL, NULL, NULL);
64 ktrace_t *xfs_buf_trace_buf;
65 #define XFS_BUF_TRACE_SIZE 4096
66 #define XB_TRACE(bp, id, data) \
67 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
69 #define XB_TRACE(bp, id, data) do { } while (0)
72 #ifdef XFS_BUF_LOCK_TRACKING
73 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
74 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
75 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
77 # define XB_SET_OWNER(bp) do { } while (0)
78 # define XB_CLEAR_OWNER(bp) do { } while (0)
79 # define XB_GET_OWNER(bp) do { } while (0)
82 #define xb_to_gfp(flags) \
83 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
84 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
86 #define xb_to_km(flags) \
87 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
89 #define xfs_buf_allocate(flags) \
90 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
91 #define xfs_buf_deallocate(bp) \
92 kmem_zone_free(xfs_buf_zone, (bp));
95 * Page Region interfaces.
97 * For pages in filesystems where the blocksize is smaller than the
98 * pagesize, we use the page->private field (long) to hold a bitmap
99 * of uptodate regions within the page.
101 * Each such region is "bytes per page / bits per long" bytes long.
103 * NBPPR == number-of-bytes-per-page-region
104 * BTOPR == bytes-to-page-region (rounded up)
105 * BTOPRT == bytes-to-page-region-truncated (rounded down)
107 #if (BITS_PER_LONG == 32)
108 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
109 #elif (BITS_PER_LONG == 64)
110 #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
112 #error BITS_PER_LONG must be 32 or 64
114 #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
115 #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
116 #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
126 first = BTOPR(offset);
127 final = BTOPRT(offset + length - 1);
128 first = min(first, final);
131 mask <<= BITS_PER_LONG - (final - first);
132 mask >>= BITS_PER_LONG - (final);
134 ASSERT(offset + length <= PAGE_CACHE_SIZE);
135 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
146 set_page_private(page,
147 page_private(page) | page_region_mask(offset, length));
148 if (page_private(page) == ~0UL)
149 SetPageUptodate(page);
158 unsigned long mask = page_region_mask(offset, length);
160 return (mask && (page_private(page) & mask) == mask);
164 * Mapping of multi-page buffers into contiguous virtual space
167 typedef struct a_list {
172 STATIC a_list_t *as_free_head;
173 STATIC int as_list_len;
174 STATIC DEFINE_SPINLOCK(as_lock);
177 * Try to batch vunmaps because they are costly.
185 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
186 if (likely(aentry)) {
188 aentry->next = as_free_head;
189 aentry->vm_addr = addr;
190 as_free_head = aentry;
192 spin_unlock(&as_lock);
199 purge_addresses(void)
201 a_list_t *aentry, *old;
203 if (as_free_head == NULL)
207 aentry = as_free_head;
210 spin_unlock(&as_lock);
212 while ((old = aentry) != NULL) {
213 vunmap(aentry->vm_addr);
214 aentry = aentry->next;
220 * Internal xfs_buf_t object manipulation
226 xfs_buftarg_t *target,
227 xfs_off_t range_base,
229 xfs_buf_flags_t flags)
232 * We don't want certain flags to appear in b_flags.
234 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
236 memset(bp, 0, sizeof(xfs_buf_t));
237 atomic_set(&bp->b_hold, 1);
238 init_MUTEX_LOCKED(&bp->b_iodonesema);
239 INIT_LIST_HEAD(&bp->b_list);
240 INIT_LIST_HEAD(&bp->b_hash_list);
241 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
243 bp->b_target = target;
244 bp->b_file_offset = range_base;
246 * Set buffer_length and count_desired to the same value initially.
247 * I/O routines should use count_desired, which will be the same in
248 * most cases but may be reset (e.g. XFS recovery).
250 bp->b_buffer_length = bp->b_count_desired = range_length;
252 bp->b_bn = XFS_BUF_DADDR_NULL;
253 atomic_set(&bp->b_pin_count, 0);
254 init_waitqueue_head(&bp->b_waiters);
256 XFS_STATS_INC(xb_create);
257 XB_TRACE(bp, "initialize", target);
261 * Allocate a page array capable of holding a specified number
262 * of pages, and point the page buf at it.
268 xfs_buf_flags_t flags)
270 /* Make sure that we have a page list */
271 if (bp->b_pages == NULL) {
272 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
273 bp->b_page_count = page_count;
274 if (page_count <= XB_PAGES) {
275 bp->b_pages = bp->b_page_array;
277 bp->b_pages = kmem_alloc(sizeof(struct page *) *
278 page_count, xb_to_km(flags));
279 if (bp->b_pages == NULL)
282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
288 * Frees b_pages if it was allocated.
294 if (bp->b_pages != bp->b_page_array) {
295 kmem_free(bp->b_pages,
296 bp->b_page_count * sizeof(struct page *));
301 * Releases the specified buffer.
303 * The modification state of any associated pages is left unchanged.
304 * The buffer most not be on any hash - use xfs_buf_rele instead for
305 * hashed and refcounted buffers
311 XB_TRACE(bp, "free", 0);
313 ASSERT(list_empty(&bp->b_hash_list));
315 if (bp->b_flags & _XBF_PAGE_CACHE) {
318 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319 free_address(bp->b_addr - bp->b_offset);
321 for (i = 0; i < bp->b_page_count; i++) {
322 struct page *page = bp->b_pages[i];
324 ASSERT(!PagePrivate(page));
325 page_cache_release(page);
327 _xfs_buf_free_pages(bp);
328 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
330 * XXX(hch): bp->b_count_desired might be incorrect (see
331 * xfs_buf_associate_memory for details), but fortunately
332 * the Linux version of kmem_free ignores the len argument..
334 kmem_free(bp->b_addr, bp->b_count_desired);
335 _xfs_buf_free_pages(bp);
338 xfs_buf_deallocate(bp);
342 * Finds all pages for buffer in question and builds it's page list.
345 _xfs_buf_lookup_pages(
349 struct address_space *mapping = bp->b_target->bt_mapping;
350 size_t blocksize = bp->b_target->bt_bsize;
351 size_t size = bp->b_count_desired;
352 size_t nbytes, offset;
353 gfp_t gfp_mask = xb_to_gfp(flags);
354 unsigned short page_count, i;
359 end = bp->b_file_offset + bp->b_buffer_length;
360 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
362 error = _xfs_buf_get_pages(bp, page_count, flags);
365 bp->b_flags |= _XBF_PAGE_CACHE;
367 offset = bp->b_offset;
368 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
370 for (i = 0; i < bp->b_page_count; i++) {
375 page = find_or_create_page(mapping, first + i, gfp_mask);
376 if (unlikely(page == NULL)) {
377 if (flags & XBF_READ_AHEAD) {
378 bp->b_page_count = i;
379 for (i = 0; i < bp->b_page_count; i++)
380 unlock_page(bp->b_pages[i]);
385 * This could deadlock.
387 * But until all the XFS lowlevel code is revamped to
388 * handle buffer allocation failures we can't do much.
390 if (!(++retries % 100))
392 "XFS: possible memory allocation "
393 "deadlock in %s (mode:0x%x)\n",
394 __FUNCTION__, gfp_mask);
396 XFS_STATS_INC(xb_page_retries);
397 xfsbufd_wakeup(0, gfp_mask);
398 blk_congestion_wait(WRITE, HZ/50);
402 XFS_STATS_INC(xb_page_found);
404 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
407 ASSERT(!PagePrivate(page));
408 if (!PageUptodate(page)) {
410 if (blocksize >= PAGE_CACHE_SIZE) {
411 if (flags & XBF_READ)
413 } else if (!PagePrivate(page)) {
414 if (test_page_region(page, offset, nbytes))
419 bp->b_pages[i] = page;
424 for (i = 0; i < bp->b_page_count; i++)
425 unlock_page(bp->b_pages[i]);
428 if (page_count == bp->b_page_count)
429 bp->b_flags |= XBF_DONE;
431 XB_TRACE(bp, "lookup_pages", (long)page_count);
436 * Map buffer into kernel address-space if nessecary.
443 /* A single page buffer is always mappable */
444 if (bp->b_page_count == 1) {
445 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
446 bp->b_flags |= XBF_MAPPED;
447 } else if (flags & XBF_MAPPED) {
448 if (as_list_len > 64)
450 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
451 VM_MAP, PAGE_KERNEL);
452 if (unlikely(bp->b_addr == NULL))
454 bp->b_addr += bp->b_offset;
455 bp->b_flags |= XBF_MAPPED;
462 * Finding and Reading Buffers
466 * Look up, and creates if absent, a lockable buffer for
467 * a given range of an inode. The buffer is returned
468 * locked. If other overlapping buffers exist, they are
469 * released before the new buffer is created and locked,
470 * which may imply that this call will block until those buffers
471 * are unlocked. No I/O is implied by this call.
475 xfs_buftarg_t *btp, /* block device target */
476 xfs_off_t ioff, /* starting offset of range */
477 size_t isize, /* length of range */
478 xfs_buf_flags_t flags,
481 xfs_off_t range_base;
486 range_base = (ioff << BBSHIFT);
487 range_length = (isize << BBSHIFT);
489 /* Check for IOs smaller than the sector size / not sector aligned */
490 ASSERT(!(range_length < (1 << btp->bt_sshift)));
491 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
493 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
495 spin_lock(&hash->bh_lock);
497 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
498 ASSERT(btp == bp->b_target);
499 if (bp->b_file_offset == range_base &&
500 bp->b_buffer_length == range_length) {
502 * If we look at something, bring it to the
503 * front of the list for next time.
505 atomic_inc(&bp->b_hold);
506 list_move(&bp->b_hash_list, &hash->bh_list);
513 _xfs_buf_initialize(new_bp, btp, range_base,
514 range_length, flags);
515 new_bp->b_hash = hash;
516 list_add(&new_bp->b_hash_list, &hash->bh_list);
518 XFS_STATS_INC(xb_miss_locked);
521 spin_unlock(&hash->bh_lock);
525 spin_unlock(&hash->bh_lock);
527 /* Attempt to get the semaphore without sleeping,
528 * if this does not work then we need to drop the
529 * spinlock and do a hard attempt on the semaphore.
531 if (down_trylock(&bp->b_sema)) {
532 if (!(flags & XBF_TRYLOCK)) {
533 /* wait for buffer ownership */
534 XB_TRACE(bp, "get_lock", 0);
536 XFS_STATS_INC(xb_get_locked_waited);
538 /* We asked for a trylock and failed, no need
539 * to look at file offset and length here, we
540 * know that this buffer at least overlaps our
541 * buffer and is locked, therefore our buffer
542 * either does not exist, or is this buffer.
545 XFS_STATS_INC(xb_busy_locked);
553 if (bp->b_flags & XBF_STALE) {
554 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
555 bp->b_flags &= XBF_MAPPED;
557 XB_TRACE(bp, "got_lock", 0);
558 XFS_STATS_INC(xb_get_locked);
563 * Assembles a buffer covering the specified range.
564 * Storage in memory for all portions of the buffer will be allocated,
565 * although backing storage may not be.
569 xfs_buftarg_t *target,/* target for buffer */
570 xfs_off_t ioff, /* starting offset of range */
571 size_t isize, /* length of range */
572 xfs_buf_flags_t flags)
574 xfs_buf_t *bp, *new_bp;
577 new_bp = xfs_buf_allocate(flags);
578 if (unlikely(!new_bp))
581 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
583 error = _xfs_buf_lookup_pages(bp, flags);
587 xfs_buf_deallocate(new_bp);
588 if (unlikely(bp == NULL))
592 for (i = 0; i < bp->b_page_count; i++)
593 mark_page_accessed(bp->b_pages[i]);
595 if (!(bp->b_flags & XBF_MAPPED)) {
596 error = _xfs_buf_map_pages(bp, flags);
597 if (unlikely(error)) {
598 printk(KERN_WARNING "%s: failed to map pages\n",
604 XFS_STATS_INC(xb_get);
607 * Always fill in the block number now, the mapped cases can do
608 * their own overlay of this later.
611 bp->b_count_desired = bp->b_buffer_length;
613 XB_TRACE(bp, "get", (unsigned long)flags);
617 if (flags & (XBF_LOCK | XBF_TRYLOCK))
625 xfs_buftarg_t *target,
628 xfs_buf_flags_t flags)
634 bp = xfs_buf_get_flags(target, ioff, isize, flags);
636 if (!XFS_BUF_ISDONE(bp)) {
637 XB_TRACE(bp, "read", (unsigned long)flags);
638 XFS_STATS_INC(xb_get_read);
639 xfs_buf_iostart(bp, flags);
640 } else if (flags & XBF_ASYNC) {
641 XB_TRACE(bp, "read_async", (unsigned long)flags);
643 * Read ahead call which is already satisfied,
648 XB_TRACE(bp, "read_done", (unsigned long)flags);
649 /* We do not want read in the flags */
650 bp->b_flags &= ~XBF_READ;
657 if (flags & (XBF_LOCK | XBF_TRYLOCK))
664 * If we are not low on memory then do the readahead in a deadlock
669 xfs_buftarg_t *target,
672 xfs_buf_flags_t flags)
674 struct backing_dev_info *bdi;
676 bdi = target->bt_mapping->backing_dev_info;
677 if (bdi_read_congested(bdi))
680 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
681 xfs_buf_read_flags(target, ioff, isize, flags);
687 xfs_buftarg_t *target)
691 bp = xfs_buf_allocate(0);
693 _xfs_buf_initialize(bp, target, 0, len, 0);
697 static inline struct page *
701 if (((unsigned long)addr < VMALLOC_START) ||
702 ((unsigned long)addr >= VMALLOC_END)) {
703 return virt_to_page(addr);
705 return vmalloc_to_page(addr);
710 xfs_buf_associate_memory(
722 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
723 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
724 if (offset && (len > PAGE_CACHE_SIZE))
727 /* Free any previous set of page pointers */
729 _xfs_buf_free_pages(bp);
734 rval = _xfs_buf_get_pages(bp, page_count, 0);
738 bp->b_offset = offset;
739 ptr = (size_t) mem & PAGE_CACHE_MASK;
740 end = PAGE_CACHE_ALIGN((size_t) mem + len);
742 /* set up first page */
743 bp->b_pages[0] = mem_to_page(mem);
745 ptr += PAGE_CACHE_SIZE;
746 bp->b_page_count = ++i;
748 bp->b_pages[i] = mem_to_page((void *)ptr);
749 bp->b_page_count = ++i;
750 ptr += PAGE_CACHE_SIZE;
754 bp->b_count_desired = bp->b_buffer_length = len;
755 bp->b_flags |= XBF_MAPPED;
763 xfs_buftarg_t *target)
765 size_t malloc_len = len;
770 bp = xfs_buf_allocate(0);
771 if (unlikely(bp == NULL))
773 _xfs_buf_initialize(bp, target, 0, len, 0);
776 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
777 if (unlikely(data == NULL))
780 /* check whether alignment matches.. */
781 if ((__psunsigned_t)data !=
782 ((__psunsigned_t)data & ~target->bt_smask)) {
783 /* .. else double the size and try again */
784 kmem_free(data, malloc_len);
789 error = xfs_buf_associate_memory(bp, data, len);
792 bp->b_flags |= _XBF_KMEM_ALLOC;
796 XB_TRACE(bp, "no_daddr", data);
799 kmem_free(data, malloc_len);
807 * Increment reference count on buffer, to hold the buffer concurrently
808 * with another thread which may release (free) the buffer asynchronously.
809 * Must hold the buffer already to call this function.
815 atomic_inc(&bp->b_hold);
816 XB_TRACE(bp, "hold", 0);
820 * Releases a hold on the specified buffer. If the
821 * the hold count is 1, calls xfs_buf_free.
827 xfs_bufhash_t *hash = bp->b_hash;
829 XB_TRACE(bp, "rele", bp->b_relse);
831 if (unlikely(!hash)) {
832 ASSERT(!bp->b_relse);
833 if (atomic_dec_and_test(&bp->b_hold))
838 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
840 atomic_inc(&bp->b_hold);
841 spin_unlock(&hash->bh_lock);
842 (*(bp->b_relse)) (bp);
843 } else if (bp->b_flags & XBF_FS_MANAGED) {
844 spin_unlock(&hash->bh_lock);
846 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
847 list_del_init(&bp->b_hash_list);
848 spin_unlock(&hash->bh_lock);
853 * Catch reference count leaks
855 ASSERT(atomic_read(&bp->b_hold) >= 0);
861 * Mutual exclusion on buffers. Locking model:
863 * Buffers associated with inodes for which buffer locking
864 * is not enabled are not protected by semaphores, and are
865 * assumed to be exclusively owned by the caller. There is a
866 * spinlock in the buffer, used by the caller when concurrent
867 * access is possible.
871 * Locks a buffer object, if it is not already locked.
872 * Note that this in no way locks the underlying pages, so it is only
873 * useful for synchronizing concurrent use of buffer objects, not for
874 * synchronizing independent access to the underlying pages.
882 locked = down_trylock(&bp->b_sema) == 0;
886 XB_TRACE(bp, "cond_lock", (long)locked);
887 return locked ? 0 : -EBUSY;
890 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
895 return atomic_read(&bp->b_sema.count);
900 * Locks a buffer object.
901 * Note that this in no way locks the underlying pages, so it is only
902 * useful for synchronizing concurrent use of buffer objects, not for
903 * synchronizing independent access to the underlying pages.
909 XB_TRACE(bp, "lock", 0);
910 if (atomic_read(&bp->b_io_remaining))
911 blk_run_address_space(bp->b_target->bt_mapping);
914 XB_TRACE(bp, "locked", 0);
918 * Releases the lock on the buffer object.
919 * If the buffer is marked delwri but is not queued, do so before we
920 * unlock the buffer as we need to set flags correctly. We also need to
921 * take a reference for the delwri queue because the unlocker is going to
922 * drop their's and they don't know we just queued it.
928 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
929 atomic_inc(&bp->b_hold);
930 bp->b_flags |= XBF_ASYNC;
931 xfs_buf_delwri_queue(bp, 0);
936 XB_TRACE(bp, "unlock", 0);
941 * Pinning Buffer Storage in Memory
942 * Ensure that no attempt to force a buffer to disk will succeed.
948 atomic_inc(&bp->b_pin_count);
949 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
956 if (atomic_dec_and_test(&bp->b_pin_count))
957 wake_up_all(&bp->b_waiters);
958 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
965 return atomic_read(&bp->b_pin_count);
972 DECLARE_WAITQUEUE (wait, current);
974 if (atomic_read(&bp->b_pin_count) == 0)
977 add_wait_queue(&bp->b_waiters, &wait);
979 set_current_state(TASK_UNINTERRUPTIBLE);
980 if (atomic_read(&bp->b_pin_count) == 0)
982 if (atomic_read(&bp->b_io_remaining))
983 blk_run_address_space(bp->b_target->bt_mapping);
986 remove_wait_queue(&bp->b_waiters, &wait);
987 set_current_state(TASK_RUNNING);
991 * Buffer Utility Routines
998 xfs_buf_t *bp = (xfs_buf_t *)v;
1001 (*(bp->b_iodone))(bp);
1002 else if (bp->b_flags & XBF_ASYNC)
1011 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1012 if (bp->b_error == 0)
1013 bp->b_flags |= XBF_DONE;
1015 XB_TRACE(bp, "iodone", bp->b_iodone);
1017 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1019 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1020 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1022 xfs_buf_iodone_work(bp);
1025 up(&bp->b_iodonesema);
1034 ASSERT(error >= 0 && error <= 0xffff);
1035 bp->b_error = (unsigned short)error;
1036 XB_TRACE(bp, "ioerror", (unsigned long)error);
1040 * Initiate I/O on a buffer, based on the flags supplied.
1041 * The b_iodone routine in the buffer supplied will only be called
1042 * when all of the subsidiary I/O requests, if any, have been completed.
1047 xfs_buf_flags_t flags)
1051 XB_TRACE(bp, "iostart", (unsigned long)flags);
1053 if (flags & XBF_DELWRI) {
1054 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1055 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1056 xfs_buf_delwri_queue(bp, 1);
1060 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1061 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1062 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1063 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1065 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1067 /* For writes allow an alternate strategy routine to precede
1068 * the actual I/O request (which may not be issued at all in
1069 * a shutdown situation, for example).
1071 status = (flags & XBF_WRITE) ?
1072 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1074 /* Wait for I/O if we are not an async request.
1075 * Note: async I/O request completion will release the buffer,
1076 * and that can already be done by this point. So using the
1077 * buffer pointer from here on, after async I/O, is invalid.
1079 if (!status && !(flags & XBF_ASYNC))
1080 status = xfs_buf_iowait(bp);
1085 STATIC __inline__ int
1089 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1090 if (bp->b_flags & XBF_READ)
1091 return bp->b_locked;
1095 STATIC __inline__ void
1100 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1102 xfs_buf_ioend(bp, schedule);
1109 unsigned int bytes_done,
1112 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1113 unsigned int blocksize = bp->b_target->bt_bsize;
1114 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1119 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1123 struct page *page = bvec->bv_page;
1125 ASSERT(!PagePrivate(page));
1126 if (unlikely(bp->b_error)) {
1127 if (bp->b_flags & XBF_READ)
1128 ClearPageUptodate(page);
1129 } else if (blocksize >= PAGE_CACHE_SIZE) {
1130 SetPageUptodate(page);
1131 } else if (!PagePrivate(page) &&
1132 (bp->b_flags & _XBF_PAGE_CACHE)) {
1133 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1136 if (--bvec >= bio->bi_io_vec)
1137 prefetchw(&bvec->bv_page->flags);
1139 if (_xfs_buf_iolocked(bp)) {
1142 } while (bvec >= bio->bi_io_vec);
1144 _xfs_buf_ioend(bp, 1);
1153 int i, rw, map_i, total_nr_pages, nr_pages;
1155 int offset = bp->b_offset;
1156 int size = bp->b_count_desired;
1157 sector_t sector = bp->b_bn;
1158 unsigned int blocksize = bp->b_target->bt_bsize;
1159 int locking = _xfs_buf_iolocked(bp);
1161 total_nr_pages = bp->b_page_count;
1164 if (bp->b_flags & XBF_ORDERED) {
1165 ASSERT(!(bp->b_flags & XBF_READ));
1167 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1168 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1169 bp->b_flags &= ~_XBF_RUN_QUEUES;
1170 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1172 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1173 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1176 /* Special code path for reading a sub page size buffer in --
1177 * we populate up the whole page, and hence the other metadata
1178 * in the same page. This optimization is only valid when the
1179 * filesystem block size is not smaller than the page size.
1181 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1182 (bp->b_flags & XBF_READ) && locking &&
1183 (blocksize >= PAGE_CACHE_SIZE)) {
1184 bio = bio_alloc(GFP_NOIO, 1);
1186 bio->bi_bdev = bp->b_target->bt_bdev;
1187 bio->bi_sector = sector - (offset >> BBSHIFT);
1188 bio->bi_end_io = xfs_buf_bio_end_io;
1189 bio->bi_private = bp;
1191 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1194 atomic_inc(&bp->b_io_remaining);
1199 /* Lock down the pages which we need to for the request */
1200 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1201 for (i = 0; size; i++) {
1202 int nbytes = PAGE_CACHE_SIZE - offset;
1203 struct page *page = bp->b_pages[i];
1213 offset = bp->b_offset;
1214 size = bp->b_count_desired;
1218 atomic_inc(&bp->b_io_remaining);
1219 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1220 if (nr_pages > total_nr_pages)
1221 nr_pages = total_nr_pages;
1223 bio = bio_alloc(GFP_NOIO, nr_pages);
1224 bio->bi_bdev = bp->b_target->bt_bdev;
1225 bio->bi_sector = sector;
1226 bio->bi_end_io = xfs_buf_bio_end_io;
1227 bio->bi_private = bp;
1229 for (; size && nr_pages; nr_pages--, map_i++) {
1230 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1235 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1236 if (rbytes < nbytes)
1240 sector += nbytes >> BBSHIFT;
1246 if (likely(bio->bi_size)) {
1247 submit_bio(rw, bio);
1252 xfs_buf_ioerror(bp, EIO);
1260 XB_TRACE(bp, "iorequest", 0);
1262 if (bp->b_flags & XBF_DELWRI) {
1263 xfs_buf_delwri_queue(bp, 1);
1267 if (bp->b_flags & XBF_WRITE) {
1268 xfs_buf_wait_unpin(bp);
1273 /* Set the count to 1 initially, this will stop an I/O
1274 * completion callout which happens before we have started
1275 * all the I/O from calling xfs_buf_ioend too early.
1277 atomic_set(&bp->b_io_remaining, 1);
1278 _xfs_buf_ioapply(bp);
1279 _xfs_buf_ioend(bp, 0);
1286 * Waits for I/O to complete on the buffer supplied.
1287 * It returns immediately if no I/O is pending.
1288 * It returns the I/O error code, if any, or 0 if there was no error.
1294 XB_TRACE(bp, "iowait", 0);
1295 if (atomic_read(&bp->b_io_remaining))
1296 blk_run_address_space(bp->b_target->bt_mapping);
1297 down(&bp->b_iodonesema);
1298 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1309 if (bp->b_flags & XBF_MAPPED)
1310 return XFS_BUF_PTR(bp) + offset;
1312 offset += bp->b_offset;
1313 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1314 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1318 * Move data into or out of a buffer.
1322 xfs_buf_t *bp, /* buffer to process */
1323 size_t boff, /* starting buffer offset */
1324 size_t bsize, /* length to copy */
1325 caddr_t data, /* data address */
1326 xfs_buf_rw_t mode) /* read/write/zero flag */
1328 size_t bend, cpoff, csize;
1331 bend = boff + bsize;
1332 while (boff < bend) {
1333 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1334 cpoff = xfs_buf_poff(boff + bp->b_offset);
1335 csize = min_t(size_t,
1336 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1338 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1342 memset(page_address(page) + cpoff, 0, csize);
1345 memcpy(data, page_address(page) + cpoff, csize);
1348 memcpy(page_address(page) + cpoff, data, csize);
1357 * Handling of buffer targets (buftargs).
1361 * Wait for any bufs with callbacks that have been submitted but
1362 * have not yet returned... walk the hash list for the target.
1369 xfs_bufhash_t *hash;
1372 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1373 hash = &btp->bt_hash[i];
1375 spin_lock(&hash->bh_lock);
1376 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1377 ASSERT(btp == bp->b_target);
1378 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1379 spin_unlock(&hash->bh_lock);
1381 * Catch superblock reference count leaks
1384 BUG_ON(bp->b_bn == 0);
1389 spin_unlock(&hash->bh_lock);
1394 * Allocate buffer hash table for a given target.
1395 * For devices containing metadata (i.e. not the log/realtime devices)
1396 * we need to allocate a much larger hash table.
1405 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1406 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1407 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1408 sizeof(xfs_bufhash_t), KM_SLEEP);
1409 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1410 spin_lock_init(&btp->bt_hash[i].bh_lock);
1411 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1419 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1420 btp->bt_hash = NULL;
1424 * buftarg list for delwrite queue processing
1426 STATIC LIST_HEAD(xfs_buftarg_list);
1427 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1430 xfs_register_buftarg(
1433 spin_lock(&xfs_buftarg_lock);
1434 list_add(&btp->bt_list, &xfs_buftarg_list);
1435 spin_unlock(&xfs_buftarg_lock);
1439 xfs_unregister_buftarg(
1442 spin_lock(&xfs_buftarg_lock);
1443 list_del(&btp->bt_list);
1444 spin_unlock(&xfs_buftarg_lock);
1452 xfs_flush_buftarg(btp, 1);
1454 xfs_blkdev_put(btp->bt_bdev);
1455 xfs_free_bufhash(btp);
1456 iput(btp->bt_mapping->host);
1458 /* Unregister the buftarg first so that we don't get a
1459 * wakeup finding a non-existent task
1461 xfs_unregister_buftarg(btp);
1462 kthread_stop(btp->bt_task);
1464 kmem_free(btp, sizeof(*btp));
1468 xfs_setsize_buftarg_flags(
1470 unsigned int blocksize,
1471 unsigned int sectorsize,
1474 btp->bt_bsize = blocksize;
1475 btp->bt_sshift = ffs(sectorsize) - 1;
1476 btp->bt_smask = sectorsize - 1;
1478 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1480 "XFS: Cannot set_blocksize to %u on device %s\n",
1481 sectorsize, XFS_BUFTARG_NAME(btp));
1486 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1488 "XFS: %u byte sectors in use on device %s. "
1489 "This is suboptimal; %u or greater is ideal.\n",
1490 sectorsize, XFS_BUFTARG_NAME(btp),
1491 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1498 * When allocating the initial buffer target we have not yet
1499 * read in the superblock, so don't know what sized sectors
1500 * are being used is at this early stage. Play safe.
1503 xfs_setsize_buftarg_early(
1505 struct block_device *bdev)
1507 return xfs_setsize_buftarg_flags(btp,
1508 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1512 xfs_setsize_buftarg(
1514 unsigned int blocksize,
1515 unsigned int sectorsize)
1517 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1521 xfs_mapping_buftarg(
1523 struct block_device *bdev)
1525 struct backing_dev_info *bdi;
1526 struct inode *inode;
1527 struct address_space *mapping;
1528 static const struct address_space_operations mapping_aops = {
1529 .sync_page = block_sync_page,
1530 .migratepage = fail_migrate_page,
1533 inode = new_inode(bdev->bd_inode->i_sb);
1536 "XFS: Cannot allocate mapping inode for device %s\n",
1537 XFS_BUFTARG_NAME(btp));
1540 inode->i_mode = S_IFBLK;
1541 inode->i_bdev = bdev;
1542 inode->i_rdev = bdev->bd_dev;
1543 bdi = blk_get_backing_dev_info(bdev);
1545 bdi = &default_backing_dev_info;
1546 mapping = &inode->i_data;
1547 mapping->a_ops = &mapping_aops;
1548 mapping->backing_dev_info = bdi;
1549 mapping_set_gfp_mask(mapping, GFP_NOFS);
1550 btp->bt_mapping = mapping;
1555 xfs_alloc_delwrite_queue(
1560 INIT_LIST_HEAD(&btp->bt_list);
1561 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1562 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1564 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1565 if (IS_ERR(btp->bt_task)) {
1566 error = PTR_ERR(btp->bt_task);
1569 xfs_register_buftarg(btp);
1576 struct block_device *bdev,
1581 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1583 btp->bt_dev = bdev->bd_dev;
1584 btp->bt_bdev = bdev;
1585 if (xfs_setsize_buftarg_early(btp, bdev))
1587 if (xfs_mapping_buftarg(btp, bdev))
1589 if (xfs_alloc_delwrite_queue(btp))
1591 xfs_alloc_bufhash(btp, external);
1595 kmem_free(btp, sizeof(*btp));
1601 * Delayed write buffer handling
1604 xfs_buf_delwri_queue(
1608 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1609 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1611 XB_TRACE(bp, "delwri_q", (long)unlock);
1612 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1615 /* If already in the queue, dequeue and place at tail */
1616 if (!list_empty(&bp->b_list)) {
1617 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1619 atomic_dec(&bp->b_hold);
1620 list_del(&bp->b_list);
1623 bp->b_flags |= _XBF_DELWRI_Q;
1624 list_add_tail(&bp->b_list, dwq);
1625 bp->b_queuetime = jiffies;
1633 xfs_buf_delwri_dequeue(
1636 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1640 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1641 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1642 list_del_init(&bp->b_list);
1645 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1651 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1655 xfs_buf_runall_queues(
1656 struct workqueue_struct *queue)
1658 flush_workqueue(queue);
1668 spin_lock(&xfs_buftarg_lock);
1669 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1670 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1672 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1673 wake_up_process(btp->bt_task);
1675 spin_unlock(&xfs_buftarg_lock);
1683 struct list_head tmp;
1685 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1687 struct list_head *dwq = &target->bt_delwrite_queue;
1688 spinlock_t *dwlk = &target->bt_delwrite_lock;
1691 current->flags |= PF_MEMALLOC;
1693 INIT_LIST_HEAD(&tmp);
1695 if (unlikely(freezing(current))) {
1696 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1699 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1702 schedule_timeout_interruptible(
1703 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1706 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1708 list_for_each_entry_safe(bp, n, dwq, b_list) {
1709 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1710 ASSERT(bp->b_flags & XBF_DELWRI);
1712 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1713 if (!test_bit(XBT_FORCE_FLUSH,
1714 &target->bt_flags) &&
1715 time_before(jiffies,
1716 bp->b_queuetime + age)) {
1721 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1723 bp->b_flags |= XBF_WRITE;
1724 list_move_tail(&bp->b_list, &tmp);
1730 while (!list_empty(&tmp)) {
1731 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1732 ASSERT(target == bp->b_target);
1734 list_del_init(&bp->b_list);
1735 xfs_buf_iostrategy(bp);
1738 if (as_list_len > 0)
1741 blk_run_address_space(target->bt_mapping);
1743 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1744 } while (!kthread_should_stop());
1750 * Go through all incore buffers, and release buffers if they belong to
1751 * the given device. This is used in filesystem error handling to
1752 * preserve the consistency of its metadata.
1756 xfs_buftarg_t *target,
1759 struct list_head tmp;
1762 struct list_head *dwq = &target->bt_delwrite_queue;
1763 spinlock_t *dwlk = &target->bt_delwrite_lock;
1765 xfs_buf_runall_queues(xfsdatad_workqueue);
1766 xfs_buf_runall_queues(xfslogd_workqueue);
1768 INIT_LIST_HEAD(&tmp);
1770 list_for_each_entry_safe(bp, n, dwq, b_list) {
1771 ASSERT(bp->b_target == target);
1772 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1773 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1774 if (xfs_buf_ispin(bp)) {
1779 list_move_tail(&bp->b_list, &tmp);
1784 * Dropped the delayed write list lock, now walk the temporary list
1786 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1788 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
1789 bp->b_flags |= XBF_WRITE;
1791 bp->b_flags &= ~XBF_ASYNC;
1793 list_del_init(&bp->b_list);
1795 xfs_buf_iostrategy(bp);
1799 blk_run_address_space(target->bt_mapping);
1802 * Remaining list items must be flushed before returning
1804 while (!list_empty(&tmp)) {
1805 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1807 list_del_init(&bp->b_list);
1818 #ifdef XFS_BUF_TRACE
1819 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1822 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1823 KM_ZONE_HWALIGN, NULL);
1825 goto out_free_trace_buf;
1827 xfslogd_workqueue = create_workqueue("xfslogd");
1828 if (!xfslogd_workqueue)
1829 goto out_free_buf_zone;
1831 xfsdatad_workqueue = create_workqueue("xfsdatad");
1832 if (!xfsdatad_workqueue)
1833 goto out_destroy_xfslogd_workqueue;
1835 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1837 goto out_destroy_xfsdatad_workqueue;
1841 out_destroy_xfsdatad_workqueue:
1842 destroy_workqueue(xfsdatad_workqueue);
1843 out_destroy_xfslogd_workqueue:
1844 destroy_workqueue(xfslogd_workqueue);
1846 kmem_zone_destroy(xfs_buf_zone);
1848 #ifdef XFS_BUF_TRACE
1849 ktrace_free(xfs_buf_trace_buf);
1855 xfs_buf_terminate(void)
1857 kmem_shake_deregister(xfs_buf_shake);
1858 destroy_workqueue(xfsdatad_workqueue);
1859 destroy_workqueue(xfslogd_workqueue);
1860 kmem_zone_destroy(xfs_buf_zone);
1861 #ifdef XFS_BUF_TRACE
1862 ktrace_free(xfs_buf_trace_buf);