1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
23 #include <linux/slab.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <asm/byteorder.h>
28 #define MLOG_MASK_PREFIX ML_FILE_IO
29 #include <cluster/masklog.h>
36 #include "extent_map.h"
43 #include "buffer_head_io.h"
45 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
46 struct buffer_head *bh_result, int create)
50 struct ocfs2_dinode *fe = NULL;
51 struct buffer_head *bh = NULL;
52 struct buffer_head *buffer_cache_bh = NULL;
53 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
56 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
57 (unsigned long long)iblock, bh_result, create);
59 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
61 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
62 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
63 (unsigned long long)iblock);
67 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
68 OCFS2_I(inode)->ip_blkno,
69 &bh, OCFS2_BH_CACHED, inode);
74 fe = (struct ocfs2_dinode *) bh->b_data;
76 if (!OCFS2_IS_VALID_DINODE(fe)) {
77 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n",
78 (unsigned long long)fe->i_blkno, 7, fe->i_signature);
82 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
83 le32_to_cpu(fe->i_clusters))) {
84 mlog(ML_ERROR, "block offset is outside the allocated size: "
85 "%llu\n", (unsigned long long)iblock);
89 /* We don't use the page cache to create symlink data, so if
90 * need be, copy it over from the buffer cache. */
91 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
92 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
94 buffer_cache_bh = sb_getblk(osb->sb, blkno);
95 if (!buffer_cache_bh) {
96 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
100 /* we haven't locked out transactions, so a commit
101 * could've happened. Since we've got a reference on
102 * the bh, even if it commits while we're doing the
103 * copy, the data is still good. */
104 if (buffer_jbd(buffer_cache_bh)
105 && ocfs2_inode_is_new(inode)) {
106 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
108 mlog(ML_ERROR, "couldn't kmap!\n");
111 memcpy(kaddr + (bh_result->b_size * iblock),
112 buffer_cache_bh->b_data,
114 kunmap_atomic(kaddr, KM_USER0);
115 set_buffer_uptodate(bh_result);
117 brelse(buffer_cache_bh);
120 map_bh(bh_result, inode->i_sb,
121 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
133 static int ocfs2_get_block(struct inode *inode, sector_t iblock,
134 struct buffer_head *bh_result, int create)
137 u64 p_blkno, past_eof;
139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
140 (unsigned long long)iblock, bh_result, create);
142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
144 inode, inode->i_ino);
146 if (S_ISLNK(inode->i_mode)) {
147 /* this always does I/O for some reason. */
148 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
152 /* this can happen if another node truncs after our extend! */
153 spin_lock(&OCFS2_I(inode)->ip_lock);
154 if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
155 OCFS2_I(inode)->ip_clusters))
157 spin_unlock(&OCFS2_I(inode)->ip_lock);
161 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL);
163 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
164 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
165 (unsigned long long)p_blkno);
169 map_bh(bh_result, inode->i_sb, p_blkno);
171 if (bh_result->b_blocknr == 0) {
173 mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
174 (unsigned long long)iblock,
175 (unsigned long long)p_blkno,
176 (unsigned long long)OCFS2_I(inode)->ip_blkno);
179 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
180 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
181 (unsigned long long)past_eof);
183 if (create && (iblock >= past_eof))
184 set_buffer_new(bh_result);
194 static int ocfs2_readpage(struct file *file, struct page *page)
196 struct inode *inode = page->mapping->host;
197 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
200 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
202 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
204 if (ret == AOP_TRUNCATED_PAGE)
210 down_read(&OCFS2_I(inode)->ip_alloc_sem);
213 * i_size might have just been updated as we grabed the meta lock. We
214 * might now be discovering a truncate that hit on another node.
215 * block_read_full_page->get_block freaks out if it is asked to read
216 * beyond the end of a file, so we check here. Callers
217 * (generic_file_read, fault->nopage) are clever enough to check i_size
218 * and notice that the page they just read isn't needed.
220 * XXX sys_readahead() seems to get that wrong?
222 if (start >= i_size_read(inode)) {
223 char *addr = kmap(page);
224 memset(addr, 0, PAGE_SIZE);
225 flush_dcache_page(page);
227 SetPageUptodate(page);
232 ret = ocfs2_data_lock_with_page(inode, 0, page);
234 if (ret == AOP_TRUNCATED_PAGE)
240 ret = block_read_full_page(page, ocfs2_get_block);
243 ocfs2_data_unlock(inode, 0);
245 up_read(&OCFS2_I(inode)->ip_alloc_sem);
246 ocfs2_meta_unlock(inode, 0);
254 /* Note: Because we don't support holes, our allocation has
255 * already happened (allocation writes zeros to the file data)
256 * so we don't have to worry about ordered writes in
259 * ->writepage is called during the process of invalidating the page cache
260 * during blocked lock processing. It can't block on any cluster locks
261 * to during block mapping. It's relying on the fact that the block
262 * mapping can't have disappeared under the dirty pages that it is
263 * being asked to write back.
265 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
269 mlog_entry("(0x%p)\n", page);
271 ret = block_write_full_page(page, ocfs2_get_block, wbc);
278 /* This can also be called from ocfs2_write_zero_page() which has done
279 * it's own cluster locking. */
280 int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
281 unsigned from, unsigned to)
285 down_read(&OCFS2_I(inode)->ip_alloc_sem);
287 ret = block_prepare_write(page, from, to, ocfs2_get_block);
289 up_read(&OCFS2_I(inode)->ip_alloc_sem);
295 * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
296 * from loopback. It must be able to perform its own locking around
299 static int ocfs2_prepare_write(struct file *file, struct page *page,
300 unsigned from, unsigned to)
302 struct inode *inode = page->mapping->host;
305 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
307 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
313 ret = ocfs2_prepare_write_nolock(inode, page, from, to);
315 ocfs2_meta_unlock(inode, 0);
321 /* Taken from ext3. We don't necessarily need the full blown
322 * functionality yet, but IMHO it's better to cut and paste the whole
323 * thing so we can avoid introducing our own bugs (and easily pick up
324 * their fixes when they happen) --Mark */
325 static int walk_page_buffers( handle_t *handle,
326 struct buffer_head *head,
330 int (*fn)( handle_t *handle,
331 struct buffer_head *bh))
333 struct buffer_head *bh;
334 unsigned block_start, block_end;
335 unsigned blocksize = head->b_size;
337 struct buffer_head *next;
339 for ( bh = head, block_start = 0;
340 ret == 0 && (bh != head || !block_start);
341 block_start = block_end, bh = next)
343 next = bh->b_this_page;
344 block_end = block_start + blocksize;
345 if (block_end <= from || block_start >= to) {
346 if (partial && !buffer_uptodate(bh))
350 err = (*fn)(handle, bh);
357 handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
362 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
363 handle_t *handle = NULL;
366 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
373 if (ocfs2_should_order_data(inode)) {
374 ret = walk_page_buffers(handle,
377 ocfs2_journal_dirty_data);
384 ocfs2_commit_trans(osb, handle);
385 handle = ERR_PTR(ret);
390 static int ocfs2_commit_write(struct file *file, struct page *page,
391 unsigned from, unsigned to)
394 struct buffer_head *di_bh = NULL;
395 struct inode *inode = page->mapping->host;
396 handle_t *handle = NULL;
397 struct ocfs2_dinode *di;
399 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
401 /* NOTE: ocfs2_file_aio_write has ensured that it's safe for
402 * us to continue here without rechecking the I/O against
403 * changed inode values.
405 * 1) We're currently holding the inode alloc lock, so no
406 * nodes can change it underneath us.
408 * 2) We've had to take the metadata lock at least once
409 * already to check for extending writes, suid removal, etc.
410 * The meta data update code then ensures that we don't get a
411 * stale inode allocation image (i_size, i_clusters, etc).
414 ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page);
420 ret = ocfs2_data_lock_with_page(inode, 1, page);
423 goto out_unlock_meta;
426 handle = ocfs2_start_walk_page_trans(inode, page, from, to);
427 if (IS_ERR(handle)) {
428 ret = PTR_ERR(handle);
429 goto out_unlock_data;
432 /* Mark our buffer early. We'd rather catch this error up here
433 * as opposed to after a successful commit_write which would
434 * require us to set back inode->i_size. */
435 ret = ocfs2_journal_access(handle, inode, di_bh,
436 OCFS2_JOURNAL_ACCESS_WRITE);
442 /* might update i_size */
443 ret = generic_commit_write(file, page, from, to);
449 di = (struct ocfs2_dinode *)di_bh->b_data;
451 /* ocfs2_mark_inode_dirty() is too heavy to use here. */
452 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
453 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
454 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
456 inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
457 di->i_size = cpu_to_le64((u64)i_size_read(inode));
459 ret = ocfs2_journal_dirty(handle, di_bh);
466 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
468 ocfs2_data_unlock(inode, 1);
470 ocfs2_meta_unlock(inode, 1);
479 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
484 struct inode *inode = mapping->host;
486 mlog_entry("(block = %llu)\n", (unsigned long long)block);
488 /* We don't need to lock journal system files, since they aren't
489 * accessed concurrently from multiple nodes.
491 if (!INODE_JOURNAL(inode)) {
492 err = ocfs2_meta_lock(inode, NULL, 0);
498 down_read(&OCFS2_I(inode)->ip_alloc_sem);
501 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL);
503 if (!INODE_JOURNAL(inode)) {
504 up_read(&OCFS2_I(inode)->ip_alloc_sem);
505 ocfs2_meta_unlock(inode, 0);
509 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
510 (unsigned long long)block);
517 status = err ? 0 : p_blkno;
519 mlog_exit((int)status);
525 * TODO: Make this into a generic get_blocks function.
527 * From do_direct_io in direct-io.c:
528 * "So what we do is to permit the ->get_blocks function to populate
529 * bh.b_size with the size of IO which is permitted at this offset and
532 * This function is called directly from get_more_blocks in direct-io.c.
534 * called like this: dio->get_blocks(dio->inode, fs_startblk,
535 * fs_count, map_bh, dio->rw == WRITE);
537 static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
538 struct buffer_head *bh_result, int create)
541 u64 p_blkno, inode_blocks;
543 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
544 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
546 /* This function won't even be called if the request isn't all
547 * nicely aligned and of the right size, so there's no need
548 * for us to check any of that. */
550 spin_lock(&OCFS2_I(inode)->ip_lock);
551 inode_blocks = ocfs2_clusters_to_blocks(inode->i_sb,
552 OCFS2_I(inode)->ip_clusters);
555 * For a read which begins past the end of file, we return a hole.
557 if (!create && (iblock >= inode_blocks)) {
558 spin_unlock(&OCFS2_I(inode)->ip_lock);
564 * Any write past EOF is not allowed because we'd be extending.
566 if (create && (iblock + max_blocks) > inode_blocks) {
567 spin_unlock(&OCFS2_I(inode)->ip_lock);
571 spin_unlock(&OCFS2_I(inode)->ip_lock);
573 /* This figures out the size of the next contiguous block, and
574 * our logical offset */
575 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
578 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
579 (unsigned long long)iblock);
584 map_bh(bh_result, inode->i_sb, p_blkno);
586 /* make sure we don't map more than max_blocks blocks here as
587 that's all the kernel will handle at this point. */
588 if (max_blocks < contig_blocks)
589 contig_blocks = max_blocks;
590 bh_result->b_size = contig_blocks << blocksize_bits;
596 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
597 * particularly interested in the aio/dio case. Like the core uses
598 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
599 * truncation on another.
601 static void ocfs2_dio_end_io(struct kiocb *iocb,
606 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
608 /* this io's submitter should not have unlocked this before we could */
609 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
610 ocfs2_iocb_clear_rw_locked(iocb);
611 up_read(&inode->i_alloc_sem);
612 ocfs2_rw_unlock(inode, 0);
616 * ocfs2_invalidatepage() and ocfs2_releasepage() are shamelessly stolen
617 * from ext3. PageChecked() bits have been removed as OCFS2 does not
618 * do journalled data.
620 static void ocfs2_invalidatepage(struct page *page, unsigned long offset)
622 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
624 journal_invalidatepage(journal, page, offset);
627 static int ocfs2_releasepage(struct page *page, gfp_t wait)
629 journal_t *journal = OCFS2_SB(page->mapping->host->i_sb)->journal->j_journal;
631 if (!page_has_buffers(page))
633 return journal_try_to_free_buffers(journal, page, wait);
636 static ssize_t ocfs2_direct_IO(int rw,
638 const struct iovec *iov,
640 unsigned long nr_segs)
642 struct file *file = iocb->ki_filp;
643 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
649 * We get PR data locks even for O_DIRECT. This allows
650 * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
651 * extending and buffered zeroing writes race. If they did
652 * race then the buffered zeroing could be written back after
653 * the O_DIRECT I/O. It's one thing to tell people not to mix
654 * buffered and O_DIRECT writes, but expecting them to
655 * understand that file extension is also an implicit buffered
656 * write is too much. By getting the PR we force writeback of
657 * the buffered zeroing before proceeding.
659 ret = ocfs2_data_lock(inode, 0);
664 ocfs2_data_unlock(inode, 0);
666 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
667 inode->i_sb->s_bdev, iov, offset,
669 ocfs2_direct_IO_get_blocks,
676 const struct address_space_operations ocfs2_aops = {
677 .readpage = ocfs2_readpage,
678 .writepage = ocfs2_writepage,
679 .prepare_write = ocfs2_prepare_write,
680 .commit_write = ocfs2_commit_write,
682 .sync_page = block_sync_page,
683 .direct_IO = ocfs2_direct_IO,
684 .invalidatepage = ocfs2_invalidatepage,
685 .releasepage = ocfs2_releasepage,
686 .migratepage = buffer_migrate_page,