2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/highuid.h>
38 #include <linux/pagemap.h>
39 #include <linux/quotaops.h>
40 #include <linux/string.h>
41 #include <linux/slab.h>
42 #include <linux/falloc.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
49 * combine low and high parts of physical block number into ext4_fsblk_t
51 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
55 block = le32_to_cpu(ex->ee_start);
56 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
68 block = le32_to_cpu(ix->ei_leaf);
69 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
74 * ext4_ext_store_pblock:
75 * stores a large physical block number into an extent struct,
76 * breaking it into parts
78 static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
85 * ext4_idx_store_pblock:
86 * stores a large physical block number into an index struct,
87 * breaking it into parts
89 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
95 static int ext4_ext_check_header(const char *function, struct inode *inode,
96 struct ext4_extent_header *eh)
98 const char *error_msg = NULL;
100 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
101 error_msg = "invalid magic";
104 if (unlikely(eh->eh_max == 0)) {
105 error_msg = "invalid eh_max";
108 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
109 error_msg = "invalid eh_entries";
115 ext4_error(inode->i_sb, function,
116 "bad header in inode #%lu: %s - magic %x, "
117 "entries %u, max %u, depth %u",
118 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
119 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
120 le16_to_cpu(eh->eh_depth));
125 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
129 if (handle->h_buffer_credits > needed)
131 if (!ext4_journal_extend(handle, needed))
133 err = ext4_journal_restart(handle, needed);
143 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
144 struct ext4_ext_path *path)
147 /* path points to block */
148 return ext4_journal_get_write_access(handle, path->p_bh);
150 /* path points to leaf/index in inode body */
151 /* we use in-core data, no need to protect them */
161 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
162 struct ext4_ext_path *path)
166 /* path points to block */
167 err = ext4_journal_dirty_metadata(handle, path->p_bh);
169 /* path points to leaf/index in inode body */
170 err = ext4_mark_inode_dirty(handle, inode);
175 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
176 struct ext4_ext_path *path,
179 struct ext4_inode_info *ei = EXT4_I(inode);
180 ext4_fsblk_t bg_start;
181 ext4_grpblk_t colour;
185 struct ext4_extent *ex;
186 depth = path->p_depth;
188 /* try to predict block placement */
189 ex = path[depth].p_ext;
191 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
193 /* it looks like index is empty;
194 * try to find starting block from index itself */
195 if (path[depth].p_bh)
196 return path[depth].p_bh->b_blocknr;
199 /* OK. use inode's group */
200 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
201 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
202 colour = (current->pid % 16) *
203 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
204 return bg_start + colour + block;
208 ext4_ext_new_block(handle_t *handle, struct inode *inode,
209 struct ext4_ext_path *path,
210 struct ext4_extent *ex, int *err)
212 ext4_fsblk_t goal, newblock;
214 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
215 newblock = ext4_new_block(handle, inode, goal, err);
219 static int ext4_ext_space_block(struct inode *inode)
223 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
224 / sizeof(struct ext4_extent);
225 #ifdef AGGRESSIVE_TEST
232 static int ext4_ext_space_block_idx(struct inode *inode)
236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
237 / sizeof(struct ext4_extent_idx);
238 #ifdef AGGRESSIVE_TEST
245 static int ext4_ext_space_root(struct inode *inode)
249 size = sizeof(EXT4_I(inode)->i_data);
250 size -= sizeof(struct ext4_extent_header);
251 size /= sizeof(struct ext4_extent);
252 #ifdef AGGRESSIVE_TEST
259 static int ext4_ext_space_root_idx(struct inode *inode)
263 size = sizeof(EXT4_I(inode)->i_data);
264 size -= sizeof(struct ext4_extent_header);
265 size /= sizeof(struct ext4_extent_idx);
266 #ifdef AGGRESSIVE_TEST
274 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
276 int k, l = path->p_depth;
279 for (k = 0; k <= l; k++, path++) {
281 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
282 idx_pblock(path->p_idx));
283 } else if (path->p_ext) {
284 ext_debug(" %d:%d:%llu ",
285 le32_to_cpu(path->p_ext->ee_block),
286 ext4_ext_get_actual_len(path->p_ext),
287 ext_pblock(path->p_ext));
294 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
296 int depth = ext_depth(inode);
297 struct ext4_extent_header *eh;
298 struct ext4_extent *ex;
304 eh = path[depth].p_hdr;
305 ex = EXT_FIRST_EXTENT(eh);
307 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
308 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
309 ext4_ext_get_actual_len(ex), ext_pblock(ex));
314 #define ext4_ext_show_path(inode,path)
315 #define ext4_ext_show_leaf(inode,path)
318 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
320 int depth = path->p_depth;
323 for (i = 0; i <= depth; i++, path++)
331 * ext4_ext_binsearch_idx:
332 * binary search for the closest index of the given block
335 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
337 struct ext4_extent_header *eh = path->p_hdr;
338 struct ext4_extent_idx *r, *l, *m;
340 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
341 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
342 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
344 ext_debug("binsearch for %d(idx): ", block);
346 l = EXT_FIRST_INDEX(eh) + 1;
347 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
350 if (block < le32_to_cpu(m->ei_block))
354 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
355 m, m->ei_block, r, r->ei_block);
359 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
360 idx_block(path->p_idx));
362 #ifdef CHECK_BINSEARCH
364 struct ext4_extent_idx *chix, *ix;
367 chix = ix = EXT_FIRST_INDEX(eh);
368 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
370 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
371 printk("k=%d, ix=0x%p, first=0x%p\n", k,
372 ix, EXT_FIRST_INDEX(eh));
374 le32_to_cpu(ix->ei_block),
375 le32_to_cpu(ix[-1].ei_block));
377 BUG_ON(k && le32_to_cpu(ix->ei_block)
378 <= le32_to_cpu(ix[-1].ei_block));
379 if (block < le32_to_cpu(ix->ei_block))
383 BUG_ON(chix != path->p_idx);
390 * ext4_ext_binsearch:
391 * binary search for closest extent of the given block
394 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
396 struct ext4_extent_header *eh = path->p_hdr;
397 struct ext4_extent *r, *l, *m;
399 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
400 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
402 if (eh->eh_entries == 0) {
404 * this leaf is empty:
405 * we get such a leaf in split/add case
410 ext_debug("binsearch for %d: ", block);
412 l = EXT_FIRST_EXTENT(eh) + 1;
413 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
417 if (block < le32_to_cpu(m->ee_block))
421 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
422 m, m->ee_block, r, r->ee_block);
426 ext_debug(" -> %d:%llu:%d ",
427 le32_to_cpu(path->p_ext->ee_block),
428 ext_pblock(path->p_ext),
429 ext4_ext_get_actual_len(path->p_ext));
431 #ifdef CHECK_BINSEARCH
433 struct ext4_extent *chex, *ex;
436 chex = ex = EXT_FIRST_EXTENT(eh);
437 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
438 BUG_ON(k && le32_to_cpu(ex->ee_block)
439 <= le32_to_cpu(ex[-1].ee_block));
440 if (block < le32_to_cpu(ex->ee_block))
444 BUG_ON(chex != path->p_ext);
450 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
452 struct ext4_extent_header *eh;
454 eh = ext_inode_hdr(inode);
457 eh->eh_magic = EXT4_EXT_MAGIC;
458 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
459 ext4_mark_inode_dirty(handle, inode);
460 ext4_ext_invalidate_cache(inode);
464 struct ext4_ext_path *
465 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
467 struct ext4_extent_header *eh;
468 struct buffer_head *bh;
469 short int depth, i, ppos = 0, alloc = 0;
471 eh = ext_inode_hdr(inode);
473 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
474 return ERR_PTR(-EIO);
476 i = depth = ext_depth(inode);
478 /* account possible depth increase */
480 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
483 return ERR_PTR(-ENOMEM);
488 /* walk through the tree */
490 ext_debug("depth %d: num %d, max %d\n",
491 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
492 ext4_ext_binsearch_idx(inode, path + ppos, block);
493 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
494 path[ppos].p_depth = i;
495 path[ppos].p_ext = NULL;
497 bh = sb_bread(inode->i_sb, path[ppos].p_block);
501 eh = ext_block_hdr(bh);
503 BUG_ON(ppos > depth);
504 path[ppos].p_bh = bh;
505 path[ppos].p_hdr = eh;
508 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
512 path[ppos].p_depth = i;
513 path[ppos].p_hdr = eh;
514 path[ppos].p_ext = NULL;
515 path[ppos].p_idx = NULL;
517 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
521 ext4_ext_binsearch(inode, path + ppos, block);
523 ext4_ext_show_path(inode, path);
528 ext4_ext_drop_refs(path);
531 return ERR_PTR(-EIO);
535 * ext4_ext_insert_index:
536 * insert new index [@logical;@ptr] into the block at @curp;
537 * check where to insert: before @curp or after @curp
539 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
540 struct ext4_ext_path *curp,
541 int logical, ext4_fsblk_t ptr)
543 struct ext4_extent_idx *ix;
546 err = ext4_ext_get_access(handle, inode, curp);
550 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
551 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
552 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
554 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
555 len = (len - 1) * sizeof(struct ext4_extent_idx);
556 len = len < 0 ? 0 : len;
557 ext_debug("insert new index %d after: %d. "
558 "move %d from 0x%p to 0x%p\n",
560 (curp->p_idx + 1), (curp->p_idx + 2));
561 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
563 ix = curp->p_idx + 1;
566 len = len * sizeof(struct ext4_extent_idx);
567 len = len < 0 ? 0 : len;
568 ext_debug("insert new index %d before: %d. "
569 "move %d from 0x%p to 0x%p\n",
571 curp->p_idx, (curp->p_idx + 1));
572 memmove(curp->p_idx + 1, curp->p_idx, len);
576 ix->ei_block = cpu_to_le32(logical);
577 ext4_idx_store_pblock(ix, ptr);
578 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
580 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
581 > le16_to_cpu(curp->p_hdr->eh_max));
582 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
584 err = ext4_ext_dirty(handle, inode, curp);
585 ext4_std_error(inode->i_sb, err);
592 * inserts new subtree into the path, using free index entry
594 * - allocates all needed blocks (new leaf and all intermediate index blocks)
595 * - makes decision where to split
596 * - moves remaining extents and index entries (right to the split point)
597 * into the newly allocated blocks
598 * - initializes subtree
600 static int ext4_ext_split(handle_t *handle, struct inode *inode,
601 struct ext4_ext_path *path,
602 struct ext4_extent *newext, int at)
604 struct buffer_head *bh = NULL;
605 int depth = ext_depth(inode);
606 struct ext4_extent_header *neh;
607 struct ext4_extent_idx *fidx;
608 struct ext4_extent *ex;
610 ext4_fsblk_t newblock, oldblock;
612 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
615 /* make decision: where to split? */
616 /* FIXME: now decision is simplest: at current extent */
618 /* if current leaf will be split, then we should use
619 * border from split point */
620 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
621 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
622 border = path[depth].p_ext[1].ee_block;
623 ext_debug("leaf will be split."
624 " next leaf starts at %d\n",
625 le32_to_cpu(border));
627 border = newext->ee_block;
628 ext_debug("leaf will be added."
629 " next leaf starts at %d\n",
630 le32_to_cpu(border));
634 * If error occurs, then we break processing
635 * and mark filesystem read-only. index won't
636 * be inserted and tree will be in consistent
637 * state. Next mount will repair buffers too.
641 * Get array to track all allocated blocks.
642 * We need this to handle errors and free blocks
645 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
649 /* allocate all needed blocks */
650 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
651 for (a = 0; a < depth - at; a++) {
652 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
655 ablocks[a] = newblock;
658 /* initialize new leaf */
659 newblock = ablocks[--a];
660 BUG_ON(newblock == 0);
661 bh = sb_getblk(inode->i_sb, newblock);
668 err = ext4_journal_get_create_access(handle, bh);
672 neh = ext_block_hdr(bh);
674 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
675 neh->eh_magic = EXT4_EXT_MAGIC;
677 ex = EXT_FIRST_EXTENT(neh);
679 /* move remainder of path[depth] to the new leaf */
680 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
681 /* start copy from next extent */
682 /* TODO: we could do it by single memmove */
685 while (path[depth].p_ext <=
686 EXT_MAX_EXTENT(path[depth].p_hdr)) {
687 ext_debug("move %d:%llu:%d in new leaf %llu\n",
688 le32_to_cpu(path[depth].p_ext->ee_block),
689 ext_pblock(path[depth].p_ext),
690 ext4_ext_get_actual_len(path[depth].p_ext),
692 /*memmove(ex++, path[depth].p_ext++,
693 sizeof(struct ext4_extent));
699 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
700 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
703 set_buffer_uptodate(bh);
706 err = ext4_journal_dirty_metadata(handle, bh);
712 /* correct old leaf */
714 err = ext4_ext_get_access(handle, inode, path + depth);
717 path[depth].p_hdr->eh_entries =
718 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
719 err = ext4_ext_dirty(handle, inode, path + depth);
725 /* create intermediate indexes */
729 ext_debug("create %d intermediate indices\n", k);
730 /* insert new index into current index block */
731 /* current depth stored in i var */
735 newblock = ablocks[--a];
736 bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
743 err = ext4_journal_get_create_access(handle, bh);
747 neh = ext_block_hdr(bh);
748 neh->eh_entries = cpu_to_le16(1);
749 neh->eh_magic = EXT4_EXT_MAGIC;
750 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
751 neh->eh_depth = cpu_to_le16(depth - i);
752 fidx = EXT_FIRST_INDEX(neh);
753 fidx->ei_block = border;
754 ext4_idx_store_pblock(fidx, oldblock);
756 ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
757 newblock, (unsigned long) le32_to_cpu(border),
763 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
764 EXT_MAX_INDEX(path[i].p_hdr));
765 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
766 EXT_LAST_INDEX(path[i].p_hdr));
767 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
768 ext_debug("%d: move %d:%d in new index %llu\n", i,
769 le32_to_cpu(path[i].p_idx->ei_block),
770 idx_pblock(path[i].p_idx),
772 /*memmove(++fidx, path[i].p_idx++,
773 sizeof(struct ext4_extent_idx));
775 BUG_ON(neh->eh_entries > neh->eh_max);*/
780 memmove(++fidx, path[i].p_idx - m,
781 sizeof(struct ext4_extent_idx) * m);
783 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
785 set_buffer_uptodate(bh);
788 err = ext4_journal_dirty_metadata(handle, bh);
794 /* correct old index */
796 err = ext4_ext_get_access(handle, inode, path + i);
799 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
800 err = ext4_ext_dirty(handle, inode, path + i);
808 /* insert new index */
809 err = ext4_ext_insert_index(handle, inode, path + at,
810 le32_to_cpu(border), newblock);
814 if (buffer_locked(bh))
820 /* free all allocated blocks in error case */
821 for (i = 0; i < depth; i++) {
824 ext4_free_blocks(handle, inode, ablocks[i], 1);
833 * ext4_ext_grow_indepth:
834 * implements tree growing procedure:
835 * - allocates new block
836 * - moves top-level data (index block or leaf) into the new block
837 * - initializes new top-level, creating index that points to the
840 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
841 struct ext4_ext_path *path,
842 struct ext4_extent *newext)
844 struct ext4_ext_path *curp = path;
845 struct ext4_extent_header *neh;
846 struct ext4_extent_idx *fidx;
847 struct buffer_head *bh;
848 ext4_fsblk_t newblock;
851 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
855 bh = sb_getblk(inode->i_sb, newblock);
858 ext4_std_error(inode->i_sb, err);
863 err = ext4_journal_get_create_access(handle, bh);
869 /* move top-level index/leaf into new block */
870 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
872 /* set size of new block */
873 neh = ext_block_hdr(bh);
874 /* old root could have indexes or leaves
875 * so calculate e_max right way */
876 if (ext_depth(inode))
877 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
879 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
880 neh->eh_magic = EXT4_EXT_MAGIC;
881 set_buffer_uptodate(bh);
884 err = ext4_journal_dirty_metadata(handle, bh);
888 /* create index in new top-level index: num,max,pointer */
889 err = ext4_ext_get_access(handle, inode, curp);
893 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
894 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
895 curp->p_hdr->eh_entries = cpu_to_le16(1);
896 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
897 /* FIXME: it works, but actually path[0] can be index */
898 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
899 ext4_idx_store_pblock(curp->p_idx, newblock);
901 neh = ext_inode_hdr(inode);
902 fidx = EXT_FIRST_INDEX(neh);
903 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
904 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
905 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
907 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
908 err = ext4_ext_dirty(handle, inode, curp);
916 * ext4_ext_create_new_leaf:
917 * finds empty index and adds new leaf.
918 * if no free index is found, then it requests in-depth growing.
920 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
921 struct ext4_ext_path *path,
922 struct ext4_extent *newext)
924 struct ext4_ext_path *curp;
925 int depth, i, err = 0;
928 i = depth = ext_depth(inode);
930 /* walk up to the tree and look for free index entry */
932 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
937 /* we use already allocated block for index block,
938 * so subsequent data blocks should be contiguous */
939 if (EXT_HAS_FREE_INDEX(curp)) {
940 /* if we found index with free entry, then use that
941 * entry: create all needed subtree and add new leaf */
942 err = ext4_ext_split(handle, inode, path, newext, i);
945 ext4_ext_drop_refs(path);
946 path = ext4_ext_find_extent(inode,
947 le32_to_cpu(newext->ee_block),
952 /* tree is full, time to grow in depth */
953 err = ext4_ext_grow_indepth(handle, inode, path, newext);
958 ext4_ext_drop_refs(path);
959 path = ext4_ext_find_extent(inode,
960 le32_to_cpu(newext->ee_block),
968 * only first (depth 0 -> 1) produces free space;
969 * in all other cases we have to split the grown tree
971 depth = ext_depth(inode);
972 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
973 /* now we need to split */
983 * ext4_ext_next_allocated_block:
984 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
985 * NOTE: it considers block number from index entry as
986 * allocated block. Thus, index entries have to be consistent
990 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
994 BUG_ON(path == NULL);
995 depth = path->p_depth;
997 if (depth == 0 && path->p_ext == NULL)
998 return EXT_MAX_BLOCK;
1000 while (depth >= 0) {
1001 if (depth == path->p_depth) {
1003 if (path[depth].p_ext !=
1004 EXT_LAST_EXTENT(path[depth].p_hdr))
1005 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1008 if (path[depth].p_idx !=
1009 EXT_LAST_INDEX(path[depth].p_hdr))
1010 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1015 return EXT_MAX_BLOCK;
1019 * ext4_ext_next_leaf_block:
1020 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1022 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1023 struct ext4_ext_path *path)
1027 BUG_ON(path == NULL);
1028 depth = path->p_depth;
1030 /* zero-tree has no leaf blocks at all */
1032 return EXT_MAX_BLOCK;
1034 /* go to index block */
1037 while (depth >= 0) {
1038 if (path[depth].p_idx !=
1039 EXT_LAST_INDEX(path[depth].p_hdr))
1040 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1044 return EXT_MAX_BLOCK;
1048 * ext4_ext_correct_indexes:
1049 * if leaf gets modified and modified extent is first in the leaf,
1050 * then we have to correct all indexes above.
1051 * TODO: do we need to correct tree in all cases?
1053 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1054 struct ext4_ext_path *path)
1056 struct ext4_extent_header *eh;
1057 int depth = ext_depth(inode);
1058 struct ext4_extent *ex;
1062 eh = path[depth].p_hdr;
1063 ex = path[depth].p_ext;
1068 /* there is no tree at all */
1072 if (ex != EXT_FIRST_EXTENT(eh)) {
1073 /* we correct tree if first leaf got modified only */
1078 * TODO: we need correction if border is smaller than current one
1081 border = path[depth].p_ext->ee_block;
1082 err = ext4_ext_get_access(handle, inode, path + k);
1085 path[k].p_idx->ei_block = border;
1086 err = ext4_ext_dirty(handle, inode, path + k);
1091 /* change all left-side indexes */
1092 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1094 err = ext4_ext_get_access(handle, inode, path + k);
1097 path[k].p_idx->ei_block = border;
1098 err = ext4_ext_dirty(handle, inode, path + k);
1107 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1108 struct ext4_extent *ex2)
1110 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1113 * Make sure that either both extents are uninitialized, or
1116 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1119 if (ext4_ext_is_uninitialized(ex1))
1120 max_len = EXT_UNINIT_MAX_LEN;
1122 max_len = EXT_INIT_MAX_LEN;
1124 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1125 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1127 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1128 le32_to_cpu(ex2->ee_block))
1132 * To allow future support for preallocated extents to be added
1133 * as an RO_COMPAT feature, refuse to merge to extents if
1134 * this can result in the top bit of ee_len being set.
1136 if (ext1_ee_len + ext2_ee_len > max_len)
1138 #ifdef AGGRESSIVE_TEST
1139 if (le16_to_cpu(ex1->ee_len) >= 4)
1143 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1149 * This function tries to merge the "ex" extent to the next extent in the tree.
1150 * It always tries to merge towards right. If you want to merge towards
1151 * left, pass "ex - 1" as argument instead of "ex".
1152 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1153 * 1 if they got merged.
1155 int ext4_ext_try_to_merge(struct inode *inode,
1156 struct ext4_ext_path *path,
1157 struct ext4_extent *ex)
1159 struct ext4_extent_header *eh;
1160 unsigned int depth, len;
1162 int uninitialized = 0;
1164 depth = ext_depth(inode);
1165 BUG_ON(path[depth].p_hdr == NULL);
1166 eh = path[depth].p_hdr;
1168 while (ex < EXT_LAST_EXTENT(eh)) {
1169 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1171 /* merge with next extent! */
1172 if (ext4_ext_is_uninitialized(ex))
1174 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1175 + ext4_ext_get_actual_len(ex + 1));
1177 ext4_ext_mark_uninitialized(ex);
1179 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1180 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1181 * sizeof(struct ext4_extent);
1182 memmove(ex + 1, ex + 2, len);
1184 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
1186 WARN_ON(eh->eh_entries == 0);
1187 if (!eh->eh_entries)
1188 ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1189 "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1196 * check if a portion of the "newext" extent overlaps with an
1199 * If there is an overlap discovered, it updates the length of the newext
1200 * such that there will be no overlap, and then returns 1.
1201 * If there is no overlap found, it returns 0.
1203 unsigned int ext4_ext_check_overlap(struct inode *inode,
1204 struct ext4_extent *newext,
1205 struct ext4_ext_path *path)
1207 unsigned long b1, b2;
1208 unsigned int depth, len1;
1209 unsigned int ret = 0;
1211 b1 = le32_to_cpu(newext->ee_block);
1212 len1 = ext4_ext_get_actual_len(newext);
1213 depth = ext_depth(inode);
1214 if (!path[depth].p_ext)
1216 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1219 * get the next allocated block if the extent in the path
1220 * is before the requested block(s)
1223 b2 = ext4_ext_next_allocated_block(path);
1224 if (b2 == EXT_MAX_BLOCK)
1228 /* check for wrap through zero */
1229 if (b1 + len1 < b1) {
1230 len1 = EXT_MAX_BLOCK - b1;
1231 newext->ee_len = cpu_to_le16(len1);
1235 /* check for overlap */
1236 if (b1 + len1 > b2) {
1237 newext->ee_len = cpu_to_le16(b2 - b1);
1245 * ext4_ext_insert_extent:
1246 * tries to merge requsted extent into the existing extent or
1247 * inserts requested extent as new one into the tree,
1248 * creating new leaf in the no-space case.
1250 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1251 struct ext4_ext_path *path,
1252 struct ext4_extent *newext)
1254 struct ext4_extent_header * eh;
1255 struct ext4_extent *ex, *fex;
1256 struct ext4_extent *nearex; /* nearest extent */
1257 struct ext4_ext_path *npath = NULL;
1258 int depth, len, err, next;
1259 unsigned uninitialized = 0;
1261 BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1262 depth = ext_depth(inode);
1263 ex = path[depth].p_ext;
1264 BUG_ON(path[depth].p_hdr == NULL);
1266 /* try to insert block into found extent and return */
1267 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1268 ext_debug("append %d block to %d:%d (from %llu)\n",
1269 ext4_ext_get_actual_len(newext),
1270 le32_to_cpu(ex->ee_block),
1271 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1272 err = ext4_ext_get_access(handle, inode, path + depth);
1277 * ext4_can_extents_be_merged should have checked that either
1278 * both extents are uninitialized, or both aren't. Thus we
1279 * need to check only one of them here.
1281 if (ext4_ext_is_uninitialized(ex))
1283 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1284 + ext4_ext_get_actual_len(newext));
1286 ext4_ext_mark_uninitialized(ex);
1287 eh = path[depth].p_hdr;
1293 depth = ext_depth(inode);
1294 eh = path[depth].p_hdr;
1295 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1298 /* probably next leaf has space for us? */
1299 fex = EXT_LAST_EXTENT(eh);
1300 next = ext4_ext_next_leaf_block(inode, path);
1301 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1302 && next != EXT_MAX_BLOCK) {
1303 ext_debug("next leaf block - %d\n", next);
1304 BUG_ON(npath != NULL);
1305 npath = ext4_ext_find_extent(inode, next, NULL);
1307 return PTR_ERR(npath);
1308 BUG_ON(npath->p_depth != path->p_depth);
1309 eh = npath[depth].p_hdr;
1310 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1311 ext_debug("next leaf isnt full(%d)\n",
1312 le16_to_cpu(eh->eh_entries));
1316 ext_debug("next leaf has no free space(%d,%d)\n",
1317 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1321 * There is no free space in the found leaf.
1322 * We're gonna add a new leaf in the tree.
1324 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1327 depth = ext_depth(inode);
1328 eh = path[depth].p_hdr;
1331 nearex = path[depth].p_ext;
1333 err = ext4_ext_get_access(handle, inode, path + depth);
1338 /* there is no extent in this leaf, create first one */
1339 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1340 le32_to_cpu(newext->ee_block),
1342 ext4_ext_get_actual_len(newext));
1343 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1344 } else if (le32_to_cpu(newext->ee_block)
1345 > le32_to_cpu(nearex->ee_block)) {
1346 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1347 if (nearex != EXT_LAST_EXTENT(eh)) {
1348 len = EXT_MAX_EXTENT(eh) - nearex;
1349 len = (len - 1) * sizeof(struct ext4_extent);
1350 len = len < 0 ? 0 : len;
1351 ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1352 "move %d from 0x%p to 0x%p\n",
1353 le32_to_cpu(newext->ee_block),
1355 ext4_ext_get_actual_len(newext),
1356 nearex, len, nearex + 1, nearex + 2);
1357 memmove(nearex + 2, nearex + 1, len);
1359 path[depth].p_ext = nearex + 1;
1361 BUG_ON(newext->ee_block == nearex->ee_block);
1362 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1363 len = len < 0 ? 0 : len;
1364 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1365 "move %d from 0x%p to 0x%p\n",
1366 le32_to_cpu(newext->ee_block),
1368 ext4_ext_get_actual_len(newext),
1369 nearex, len, nearex + 1, nearex + 2);
1370 memmove(nearex + 1, nearex, len);
1371 path[depth].p_ext = nearex;
1374 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1375 nearex = path[depth].p_ext;
1376 nearex->ee_block = newext->ee_block;
1377 nearex->ee_start = newext->ee_start;
1378 nearex->ee_start_hi = newext->ee_start_hi;
1379 nearex->ee_len = newext->ee_len;
1382 /* try to merge extents to the right */
1383 ext4_ext_try_to_merge(inode, path, nearex);
1385 /* try to merge extents to the left */
1387 /* time to correct all indexes above */
1388 err = ext4_ext_correct_indexes(handle, inode, path);
1392 err = ext4_ext_dirty(handle, inode, path + depth);
1396 ext4_ext_drop_refs(npath);
1399 ext4_ext_tree_changed(inode);
1400 ext4_ext_invalidate_cache(inode);
1404 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1405 unsigned long num, ext_prepare_callback func,
1408 struct ext4_ext_path *path = NULL;
1409 struct ext4_ext_cache cbex;
1410 struct ext4_extent *ex;
1411 unsigned long next, start = 0, end = 0;
1412 unsigned long last = block + num;
1413 int depth, exists, err = 0;
1415 BUG_ON(func == NULL);
1416 BUG_ON(inode == NULL);
1418 while (block < last && block != EXT_MAX_BLOCK) {
1420 /* find extent for this block */
1421 path = ext4_ext_find_extent(inode, block, path);
1423 err = PTR_ERR(path);
1428 depth = ext_depth(inode);
1429 BUG_ON(path[depth].p_hdr == NULL);
1430 ex = path[depth].p_ext;
1431 next = ext4_ext_next_allocated_block(path);
1435 /* there is no extent yet, so try to allocate
1436 * all requested space */
1439 } else if (le32_to_cpu(ex->ee_block) > block) {
1440 /* need to allocate space before found extent */
1442 end = le32_to_cpu(ex->ee_block);
1443 if (block + num < end)
1445 } else if (block >= le32_to_cpu(ex->ee_block)
1446 + ext4_ext_get_actual_len(ex)) {
1447 /* need to allocate space after found extent */
1452 } else if (block >= le32_to_cpu(ex->ee_block)) {
1454 * some part of requested space is covered
1458 end = le32_to_cpu(ex->ee_block)
1459 + ext4_ext_get_actual_len(ex);
1460 if (block + num < end)
1466 BUG_ON(end <= start);
1469 cbex.ec_block = start;
1470 cbex.ec_len = end - start;
1472 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1474 cbex.ec_block = le32_to_cpu(ex->ee_block);
1475 cbex.ec_len = ext4_ext_get_actual_len(ex);
1476 cbex.ec_start = ext_pblock(ex);
1477 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1480 BUG_ON(cbex.ec_len == 0);
1481 err = func(inode, path, &cbex, cbdata);
1482 ext4_ext_drop_refs(path);
1486 if (err == EXT_REPEAT)
1488 else if (err == EXT_BREAK) {
1493 if (ext_depth(inode) != depth) {
1494 /* depth was changed. we have to realloc path */
1499 block = cbex.ec_block + cbex.ec_len;
1503 ext4_ext_drop_refs(path);
1511 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1512 __u32 len, __u32 start, int type)
1514 struct ext4_ext_cache *cex;
1516 cex = &EXT4_I(inode)->i_cached_extent;
1517 cex->ec_type = type;
1518 cex->ec_block = block;
1520 cex->ec_start = start;
1524 * ext4_ext_put_gap_in_cache:
1525 * calculate boundaries of the gap that the requested block fits into
1526 * and cache this gap
1529 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1530 unsigned long block)
1532 int depth = ext_depth(inode);
1533 unsigned long lblock, len;
1534 struct ext4_extent *ex;
1536 ex = path[depth].p_ext;
1538 /* there is no extent yet, so gap is [0;-] */
1540 len = EXT_MAX_BLOCK;
1541 ext_debug("cache gap(whole file):");
1542 } else if (block < le32_to_cpu(ex->ee_block)) {
1544 len = le32_to_cpu(ex->ee_block) - block;
1545 ext_debug("cache gap(before): %lu [%lu:%lu]",
1546 (unsigned long) block,
1547 (unsigned long) le32_to_cpu(ex->ee_block),
1548 (unsigned long) ext4_ext_get_actual_len(ex));
1549 } else if (block >= le32_to_cpu(ex->ee_block)
1550 + ext4_ext_get_actual_len(ex)) {
1551 lblock = le32_to_cpu(ex->ee_block)
1552 + ext4_ext_get_actual_len(ex);
1553 len = ext4_ext_next_allocated_block(path);
1554 ext_debug("cache gap(after): [%lu:%lu] %lu",
1555 (unsigned long) le32_to_cpu(ex->ee_block),
1556 (unsigned long) ext4_ext_get_actual_len(ex),
1557 (unsigned long) block);
1558 BUG_ON(len == lblock);
1565 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1566 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1570 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1571 struct ext4_extent *ex)
1573 struct ext4_ext_cache *cex;
1575 cex = &EXT4_I(inode)->i_cached_extent;
1577 /* has cache valid data? */
1578 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1579 return EXT4_EXT_CACHE_NO;
1581 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1582 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1583 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1584 ex->ee_block = cpu_to_le32(cex->ec_block);
1585 ext4_ext_store_pblock(ex, cex->ec_start);
1586 ex->ee_len = cpu_to_le16(cex->ec_len);
1587 ext_debug("%lu cached by %lu:%lu:%llu\n",
1588 (unsigned long) block,
1589 (unsigned long) cex->ec_block,
1590 (unsigned long) cex->ec_len,
1592 return cex->ec_type;
1596 return EXT4_EXT_CACHE_NO;
1601 * removes index from the index block.
1602 * It's used in truncate case only, thus all requests are for
1603 * last index in the block only.
1605 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1606 struct ext4_ext_path *path)
1608 struct buffer_head *bh;
1612 /* free index block */
1614 leaf = idx_pblock(path->p_idx);
1615 BUG_ON(path->p_hdr->eh_entries == 0);
1616 err = ext4_ext_get_access(handle, inode, path);
1619 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1620 err = ext4_ext_dirty(handle, inode, path);
1623 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1624 bh = sb_find_get_block(inode->i_sb, leaf);
1625 ext4_forget(handle, 1, inode, bh, leaf);
1626 ext4_free_blocks(handle, inode, leaf, 1);
1631 * ext4_ext_calc_credits_for_insert:
1632 * This routine returns max. credits that the extent tree can consume.
1633 * It should be OK for low-performance paths like ->writepage()
1634 * To allow many writing processes to fit into a single transaction,
1635 * the caller should calculate credits under truncate_mutex and
1636 * pass the actual path.
1638 int ext4_ext_calc_credits_for_insert(struct inode *inode,
1639 struct ext4_ext_path *path)
1644 /* probably there is space in leaf? */
1645 depth = ext_depth(inode);
1646 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1647 < le16_to_cpu(path[depth].p_hdr->eh_max))
1652 * given 32-bit logical block (4294967296 blocks), max. tree
1653 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1654 * Let's also add one more level for imbalance.
1658 /* allocation of new data block(s) */
1662 * tree can be full, so it would need to grow in depth:
1663 * we need one credit to modify old root, credits for
1664 * new root will be added in split accounting
1669 * Index split can happen, we would need:
1670 * allocate intermediate indexes (bitmap + group)
1671 * + change two blocks at each level, but root (already included)
1673 needed += (depth * 2) + (depth * 2);
1675 /* any allocation modifies superblock */
1681 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1682 struct ext4_extent *ex,
1683 unsigned long from, unsigned long to)
1685 struct buffer_head *bh;
1686 unsigned short ee_len = ext4_ext_get_actual_len(ex);
1689 #ifdef EXTENTS_STATS
1691 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1692 spin_lock(&sbi->s_ext_stats_lock);
1693 sbi->s_ext_blocks += ee_len;
1694 sbi->s_ext_extents++;
1695 if (ee_len < sbi->s_ext_min)
1696 sbi->s_ext_min = ee_len;
1697 if (ee_len > sbi->s_ext_max)
1698 sbi->s_ext_max = ee_len;
1699 if (ext_depth(inode) > sbi->s_depth_max)
1700 sbi->s_depth_max = ext_depth(inode);
1701 spin_unlock(&sbi->s_ext_stats_lock);
1704 if (from >= le32_to_cpu(ex->ee_block)
1705 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1709 num = le32_to_cpu(ex->ee_block) + ee_len - from;
1710 start = ext_pblock(ex) + ee_len - num;
1711 ext_debug("free last %lu blocks starting %llu\n", num, start);
1712 for (i = 0; i < num; i++) {
1713 bh = sb_find_get_block(inode->i_sb, start + i);
1714 ext4_forget(handle, 0, inode, bh, start + i);
1716 ext4_free_blocks(handle, inode, start, num);
1717 } else if (from == le32_to_cpu(ex->ee_block)
1718 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1719 printk("strange request: removal %lu-%lu from %u:%u\n",
1720 from, to, le32_to_cpu(ex->ee_block), ee_len);
1722 printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1723 from, to, le32_to_cpu(ex->ee_block), ee_len);
1729 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1730 struct ext4_ext_path *path, unsigned long start)
1732 int err = 0, correct_index = 0;
1733 int depth = ext_depth(inode), credits;
1734 struct ext4_extent_header *eh;
1735 unsigned a, b, block, num;
1736 unsigned long ex_ee_block;
1737 unsigned short ex_ee_len;
1738 unsigned uninitialized = 0;
1739 struct ext4_extent *ex;
1741 ext_debug("truncate since %lu in leaf\n", start);
1742 if (!path[depth].p_hdr)
1743 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1744 eh = path[depth].p_hdr;
1746 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1747 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1749 /* find where to start removing */
1750 ex = EXT_LAST_EXTENT(eh);
1752 ex_ee_block = le32_to_cpu(ex->ee_block);
1753 if (ext4_ext_is_uninitialized(ex))
1755 ex_ee_len = ext4_ext_get_actual_len(ex);
1757 while (ex >= EXT_FIRST_EXTENT(eh) &&
1758 ex_ee_block + ex_ee_len > start) {
1759 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1760 path[depth].p_ext = ex;
1762 a = ex_ee_block > start ? ex_ee_block : start;
1763 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1764 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1766 ext_debug(" border %u:%u\n", a, b);
1768 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1772 } else if (a != ex_ee_block) {
1773 /* remove tail of the extent */
1774 block = ex_ee_block;
1776 } else if (b != ex_ee_block + ex_ee_len - 1) {
1777 /* remove head of the extent */
1780 /* there is no "make a hole" API yet */
1783 /* remove whole extent: excellent! */
1784 block = ex_ee_block;
1786 BUG_ON(a != ex_ee_block);
1787 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1790 /* at present, extent can't cross block group: */
1791 /* leaf + bitmap + group desc + sb + inode */
1793 if (ex == EXT_FIRST_EXTENT(eh)) {
1795 credits += (ext_depth(inode)) + 1;
1798 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1801 handle = ext4_ext_journal_restart(handle, credits);
1802 if (IS_ERR(handle)) {
1803 err = PTR_ERR(handle);
1807 err = ext4_ext_get_access(handle, inode, path + depth);
1811 err = ext4_remove_blocks(handle, inode, ex, a, b);
1816 /* this extent is removed; mark slot entirely unused */
1817 ext4_ext_store_pblock(ex, 0);
1818 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1821 ex->ee_block = cpu_to_le32(block);
1822 ex->ee_len = cpu_to_le16(num);
1824 * Do not mark uninitialized if all the blocks in the
1825 * extent have been removed.
1827 if (uninitialized && num)
1828 ext4_ext_mark_uninitialized(ex);
1830 err = ext4_ext_dirty(handle, inode, path + depth);
1834 ext_debug("new extent: %u:%u:%llu\n", block, num,
1837 ex_ee_block = le32_to_cpu(ex->ee_block);
1838 ex_ee_len = ext4_ext_get_actual_len(ex);
1841 if (correct_index && eh->eh_entries)
1842 err = ext4_ext_correct_indexes(handle, inode, path);
1844 /* if this leaf is free, then we should
1845 * remove it from index block above */
1846 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1847 err = ext4_ext_rm_idx(handle, inode, path + depth);
1854 * ext4_ext_more_to_rm:
1855 * returns 1 if current index has to be freed (even partial)
1858 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1860 BUG_ON(path->p_idx == NULL);
1862 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1866 * if truncate on deeper level happened, it wasn't partial,
1867 * so we have to consider current index for truncation
1869 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1874 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1876 struct super_block *sb = inode->i_sb;
1877 int depth = ext_depth(inode);
1878 struct ext4_ext_path *path;
1882 ext_debug("truncate since %lu\n", start);
1884 /* probably first extent we're gonna free will be last in block */
1885 handle = ext4_journal_start(inode, depth + 1);
1887 return PTR_ERR(handle);
1889 ext4_ext_invalidate_cache(inode);
1892 * We start scanning from right side, freeing all the blocks
1893 * after i_size and walking into the tree depth-wise.
1895 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1897 ext4_journal_stop(handle);
1900 path[0].p_hdr = ext_inode_hdr(inode);
1901 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1905 path[0].p_depth = depth;
1907 while (i >= 0 && err == 0) {
1909 /* this is leaf block */
1910 err = ext4_ext_rm_leaf(handle, inode, path, start);
1911 /* root level has p_bh == NULL, brelse() eats this */
1912 brelse(path[i].p_bh);
1913 path[i].p_bh = NULL;
1918 /* this is index block */
1919 if (!path[i].p_hdr) {
1920 ext_debug("initialize header\n");
1921 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1922 if (ext4_ext_check_header(__FUNCTION__, inode,
1929 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1930 > le16_to_cpu(path[i].p_hdr->eh_max));
1931 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1933 if (!path[i].p_idx) {
1934 /* this level hasn't been touched yet */
1935 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1936 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1937 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1939 le16_to_cpu(path[i].p_hdr->eh_entries));
1941 /* we were already here, see at next index */
1945 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1946 i, EXT_FIRST_INDEX(path[i].p_hdr),
1948 if (ext4_ext_more_to_rm(path + i)) {
1949 /* go to the next level */
1950 ext_debug("move to level %d (block %llu)\n",
1951 i + 1, idx_pblock(path[i].p_idx));
1952 memset(path + i + 1, 0, sizeof(*path));
1954 sb_bread(sb, idx_pblock(path[i].p_idx));
1955 if (!path[i+1].p_bh) {
1956 /* should we reset i_size? */
1961 /* save actual number of indexes since this
1962 * number is changed at the next iteration */
1963 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1966 /* we finished processing this index, go up */
1967 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1968 /* index is empty, remove it;
1969 * handle must be already prepared by the
1970 * truncatei_leaf() */
1971 err = ext4_ext_rm_idx(handle, inode, path + i);
1973 /* root level has p_bh == NULL, brelse() eats this */
1974 brelse(path[i].p_bh);
1975 path[i].p_bh = NULL;
1977 ext_debug("return to level %d\n", i);
1981 /* TODO: flexible tree reduction should be here */
1982 if (path->p_hdr->eh_entries == 0) {
1984 * truncate to zero freed all the tree,
1985 * so we need to correct eh_depth
1987 err = ext4_ext_get_access(handle, inode, path);
1989 ext_inode_hdr(inode)->eh_depth = 0;
1990 ext_inode_hdr(inode)->eh_max =
1991 cpu_to_le16(ext4_ext_space_root(inode));
1992 err = ext4_ext_dirty(handle, inode, path);
1996 ext4_ext_tree_changed(inode);
1997 ext4_ext_drop_refs(path);
1999 ext4_journal_stop(handle);
2005 * called at mount time
2007 void ext4_ext_init(struct super_block *sb)
2010 * possible initialization would be here
2013 if (test_opt(sb, EXTENTS)) {
2014 printk("EXT4-fs: file extents enabled");
2015 #ifdef AGGRESSIVE_TEST
2016 printk(", aggressive tests");
2018 #ifdef CHECK_BINSEARCH
2019 printk(", check binsearch");
2021 #ifdef EXTENTS_STATS
2025 #ifdef EXTENTS_STATS
2026 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2027 EXT4_SB(sb)->s_ext_min = 1 << 30;
2028 EXT4_SB(sb)->s_ext_max = 0;
2034 * called at umount time
2036 void ext4_ext_release(struct super_block *sb)
2038 if (!test_opt(sb, EXTENTS))
2041 #ifdef EXTENTS_STATS
2042 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2043 struct ext4_sb_info *sbi = EXT4_SB(sb);
2044 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2045 sbi->s_ext_blocks, sbi->s_ext_extents,
2046 sbi->s_ext_blocks / sbi->s_ext_extents);
2047 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2048 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2054 * This function is called by ext4_ext_get_blocks() if someone tries to write
2055 * to an uninitialized extent. It may result in splitting the uninitialized
2056 * extent into multiple extents (upto three - one initialized and two
2058 * There are three possibilities:
2059 * a> There is no split required: Entire extent should be initialized
2060 * b> Splits in two extents: Write is happening at either end of the extent
2061 * c> Splits in three extents: Somone is writing in middle of the extent
2063 int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
2064 struct ext4_ext_path *path,
2065 ext4_fsblk_t iblock,
2066 unsigned long max_blocks)
2068 struct ext4_extent *ex, newex;
2069 struct ext4_extent *ex1 = NULL;
2070 struct ext4_extent *ex2 = NULL;
2071 struct ext4_extent *ex3 = NULL;
2072 struct ext4_extent_header *eh;
2073 unsigned int allocated, ee_block, ee_len, depth;
2074 ext4_fsblk_t newblock;
2078 depth = ext_depth(inode);
2079 eh = path[depth].p_hdr;
2080 ex = path[depth].p_ext;
2081 ee_block = le32_to_cpu(ex->ee_block);
2082 ee_len = ext4_ext_get_actual_len(ex);
2083 allocated = ee_len - (iblock - ee_block);
2084 newblock = iblock - ee_block + ext_pblock(ex);
2087 /* ex1: ee_block to iblock - 1 : uninitialized */
2088 if (iblock > ee_block) {
2090 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2091 ext4_ext_mark_uninitialized(ex1);
2095 * for sanity, update the length of the ex2 extent before
2096 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2097 * overlap of blocks.
2099 if (!ex1 && allocated > max_blocks)
2100 ex2->ee_len = cpu_to_le16(max_blocks);
2101 /* ex3: to ee_block + ee_len : uninitialised */
2102 if (allocated > max_blocks) {
2103 unsigned int newdepth;
2105 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2106 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2107 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2108 ext4_ext_mark_uninitialized(ex3);
2109 err = ext4_ext_insert_extent(handle, inode, path, ex3);
2113 * The depth, and hence eh & ex might change
2114 * as part of the insert above.
2116 newdepth = ext_depth(inode);
2117 if (newdepth != depth) {
2119 path = ext4_ext_find_extent(inode, iblock, NULL);
2121 err = PTR_ERR(path);
2125 eh = path[depth].p_hdr;
2126 ex = path[depth].p_ext;
2130 allocated = max_blocks;
2133 * If there was a change of depth as part of the
2134 * insertion of ex3 above, we need to update the length
2135 * of the ex1 extent again here
2137 if (ex1 && ex1 != ex) {
2139 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2140 ext4_ext_mark_uninitialized(ex1);
2143 /* ex2: iblock to iblock + maxblocks-1 : initialised */
2144 ex2->ee_block = cpu_to_le32(iblock);
2145 ex2->ee_start = cpu_to_le32(newblock);
2146 ext4_ext_store_pblock(ex2, newblock);
2147 ex2->ee_len = cpu_to_le16(allocated);
2150 err = ext4_ext_get_access(handle, inode, path + depth);
2154 * New (initialized) extent starts from the first block
2155 * in the current extent. i.e., ex2 == ex
2156 * We have to see if it can be merged with the extent
2159 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2161 * To merge left, pass "ex2 - 1" to try_to_merge(),
2162 * since it merges towards right _only_.
2164 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2166 err = ext4_ext_correct_indexes(handle, inode, path);
2169 depth = ext_depth(inode);
2174 * Try to Merge towards right. This might be required
2175 * only when the whole extent is being written to.
2176 * i.e. ex2 == ex and ex3 == NULL.
2179 ret = ext4_ext_try_to_merge(inode, path, ex2);
2181 err = ext4_ext_correct_indexes(handle, inode, path);
2186 /* Mark modified extent as dirty */
2187 err = ext4_ext_dirty(handle, inode, path + depth);
2190 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2192 return err ? err : allocated;
2195 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2196 ext4_fsblk_t iblock,
2197 unsigned long max_blocks, struct buffer_head *bh_result,
2198 int create, int extend_disksize)
2200 struct ext4_ext_path *path = NULL;
2201 struct ext4_extent_header *eh;
2202 struct ext4_extent newex, *ex;
2203 ext4_fsblk_t goal, newblock;
2204 int err = 0, depth, ret;
2205 unsigned long allocated = 0;
2207 __clear_bit(BH_New, &bh_result->b_state);
2208 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
2209 max_blocks, (unsigned) inode->i_ino);
2210 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2212 /* check in cache */
2213 goal = ext4_ext_in_cache(inode, iblock, &newex);
2215 if (goal == EXT4_EXT_CACHE_GAP) {
2218 * block isn't allocated yet and
2219 * user doesn't want to allocate it
2223 /* we should allocate requested block */
2224 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
2225 /* block is already allocated */
2227 - le32_to_cpu(newex.ee_block)
2228 + ext_pblock(&newex);
2229 /* number of remaining blocks in the extent */
2230 allocated = le16_to_cpu(newex.ee_len) -
2231 (iblock - le32_to_cpu(newex.ee_block));
2238 /* find extent for this block */
2239 path = ext4_ext_find_extent(inode, iblock, NULL);
2241 err = PTR_ERR(path);
2246 depth = ext_depth(inode);
2249 * consistent leaf must not be empty;
2250 * this situation is possible, though, _during_ tree modification;
2251 * this is why assert can't be put in ext4_ext_find_extent()
2253 BUG_ON(path[depth].p_ext == NULL && depth != 0);
2254 eh = path[depth].p_hdr;
2256 ex = path[depth].p_ext;
2258 unsigned long ee_block = le32_to_cpu(ex->ee_block);
2259 ext4_fsblk_t ee_start = ext_pblock(ex);
2260 unsigned short ee_len;
2263 * Uninitialized extents are treated as holes, except that
2264 * we split out initialized portions during a write.
2266 ee_len = ext4_ext_get_actual_len(ex);
2267 /* if found extent covers block, simply return it */
2268 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2269 newblock = iblock - ee_block + ee_start;
2270 /* number of remaining blocks in the extent */
2271 allocated = ee_len - (iblock - ee_block);
2272 ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
2273 ee_block, ee_len, newblock);
2275 /* Do not put uninitialized extent in the cache */
2276 if (!ext4_ext_is_uninitialized(ex)) {
2277 ext4_ext_put_in_cache(inode, ee_block,
2279 EXT4_EXT_CACHE_EXTENT);
2282 if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2287 ret = ext4_ext_convert_to_initialized(handle, inode,
2299 * requested block isn't allocated yet;
2300 * we couldn't try to create block if create flag is zero
2304 * put just found gap into cache to speed up
2305 * subsequent requests
2307 ext4_ext_put_gap_in_cache(inode, path, iblock);
2311 * Okay, we need to do block allocation. Lazily initialize the block
2312 * allocation info here if necessary.
2314 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2315 ext4_init_block_alloc_info(inode);
2317 /* allocate new block */
2318 goal = ext4_ext_find_goal(inode, path, iblock);
2321 * See if request is beyond maximum number of blocks we can have in
2322 * a single extent. For an initialized extent this limit is
2323 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2324 * EXT_UNINIT_MAX_LEN.
2326 if (max_blocks > EXT_INIT_MAX_LEN &&
2327 create != EXT4_CREATE_UNINITIALIZED_EXT)
2328 max_blocks = EXT_INIT_MAX_LEN;
2329 else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2330 create == EXT4_CREATE_UNINITIALIZED_EXT)
2331 max_blocks = EXT_UNINIT_MAX_LEN;
2333 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2334 newex.ee_block = cpu_to_le32(iblock);
2335 newex.ee_len = cpu_to_le16(max_blocks);
2336 err = ext4_ext_check_overlap(inode, &newex, path);
2338 allocated = le16_to_cpu(newex.ee_len);
2340 allocated = max_blocks;
2341 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2344 ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2345 goal, newblock, allocated);
2347 /* try to insert new extent into found leaf and return */
2348 ext4_ext_store_pblock(&newex, newblock);
2349 newex.ee_len = cpu_to_le16(allocated);
2350 if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */
2351 ext4_ext_mark_uninitialized(&newex);
2352 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2354 /* free data blocks we just allocated */
2355 ext4_free_blocks(handle, inode, ext_pblock(&newex),
2356 le16_to_cpu(newex.ee_len));
2360 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2361 EXT4_I(inode)->i_disksize = inode->i_size;
2363 /* previous routine could use block we allocated */
2364 newblock = ext_pblock(&newex);
2366 __set_bit(BH_New, &bh_result->b_state);
2368 /* Cache only when it is _not_ an uninitialized extent */
2369 if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2370 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2371 EXT4_EXT_CACHE_EXTENT);
2373 if (allocated > max_blocks)
2374 allocated = max_blocks;
2375 ext4_ext_show_leaf(inode, path);
2376 __set_bit(BH_Mapped, &bh_result->b_state);
2377 bh_result->b_bdev = inode->i_sb->s_bdev;
2378 bh_result->b_blocknr = newblock;
2381 ext4_ext_drop_refs(path);
2384 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2386 return err ? err : allocated;
2389 void ext4_ext_truncate(struct inode * inode, struct page *page)
2391 struct address_space *mapping = inode->i_mapping;
2392 struct super_block *sb = inode->i_sb;
2393 unsigned long last_block;
2398 * probably first extent we're gonna free will be last in block
2400 err = ext4_writepage_trans_blocks(inode) + 3;
2401 handle = ext4_journal_start(inode, err);
2402 if (IS_ERR(handle)) {
2404 clear_highpage(page);
2405 flush_dcache_page(page);
2407 page_cache_release(page);
2413 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2415 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2416 ext4_ext_invalidate_cache(inode);
2419 * TODO: optimization is possible here.
2420 * Probably we need not scan at all,
2421 * because page truncation is enough.
2423 if (ext4_orphan_add(handle, inode))
2426 /* we have to know where to truncate from in crash case */
2427 EXT4_I(inode)->i_disksize = inode->i_size;
2428 ext4_mark_inode_dirty(handle, inode);
2430 last_block = (inode->i_size + sb->s_blocksize - 1)
2431 >> EXT4_BLOCK_SIZE_BITS(sb);
2432 err = ext4_ext_remove_space(inode, last_block);
2434 /* In a multi-transaction truncate, we only make the final
2435 * transaction synchronous.
2442 * If this was a simple ftruncate() and the file will remain alive,
2443 * then we need to clear up the orphan record which we created above.
2444 * However, if this was a real unlink then we were called by
2445 * ext4_delete_inode(), and we allow that function to clean up the
2446 * orphan info for us.
2449 ext4_orphan_del(handle, inode);
2451 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2452 ext4_journal_stop(handle);
2456 * ext4_ext_writepage_trans_blocks:
2457 * calculate max number of blocks we could modify
2458 * in order to allocate new block for an inode
2460 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2464 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2466 /* caller wants to allocate num blocks, but note it includes sb */
2467 needed = needed * num - (num - 1);
2470 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2477 * preallocate space for a file. This implements ext4's fallocate inode
2478 * operation, which gets called from sys_fallocate system call.
2479 * For block-mapped files, posix_fallocate should fall back to the method
2480 * of writing zeroes to the required new blocks (the same behavior which is
2481 * expected for file systems which do not support fallocate() system call).
2483 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
2486 ext4_fsblk_t block, max_blocks;
2487 ext4_fsblk_t nblocks = 0;
2491 struct buffer_head map_bh;
2492 unsigned int credits, blkbits = inode->i_blkbits;
2495 * currently supporting (pre)allocate mode for extent-based
2498 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
2501 /* preallocation to directories is currently not supported */
2502 if (S_ISDIR(inode->i_mode))
2505 block = offset >> blkbits;
2506 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2510 * credits to insert 1 extent into extent tree + buffers to be able to
2511 * modify 1 super block, 1 block bitmap and 1 group descriptor.
2513 credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
2515 while (ret >= 0 && ret < max_blocks) {
2516 block = block + ret;
2517 max_blocks = max_blocks - ret;
2518 handle = ext4_journal_start(inode, credits);
2519 if (IS_ERR(handle)) {
2520 ret = PTR_ERR(handle);
2524 ret = ext4_ext_get_blocks(handle, inode, block,
2525 max_blocks, &map_bh,
2526 EXT4_CREATE_UNINITIALIZED_EXT, 0);
2529 ext4_error(inode->i_sb, "ext4_fallocate",
2530 "ext4_ext_get_blocks returned 0! inode#%lu"
2531 ", block=%llu, max_blocks=%llu",
2532 inode->i_ino, block, max_blocks);
2534 ext4_mark_inode_dirty(handle, inode);
2535 ret2 = ext4_journal_stop(handle);
2539 /* check wrap through sign-bit/zero here */
2540 if ((block + ret) < 0 || (block + ret) < block) {
2542 ext4_mark_inode_dirty(handle, inode);
2543 ret2 = ext4_journal_stop(handle);
2546 if (buffer_new(&map_bh) && ((block + ret) >
2547 (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
2549 nblocks = nblocks + ret;
2552 /* Update ctime if new blocks get allocated */
2554 struct timespec now;
2556 now = current_fs_time(inode->i_sb);
2557 if (!timespec_equal(&inode->i_ctime, &now))
2558 inode->i_ctime = now;
2561 ext4_mark_inode_dirty(handle, inode);
2562 ret2 = ext4_journal_stop(handle);
2567 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2571 * Time to update the file size.
2572 * Update only when preallocation was requested beyond the file size.
2574 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2575 (offset + len) > i_size_read(inode)) {
2578 * if no error, we assume preallocation succeeded
2581 mutex_lock(&inode->i_mutex);
2582 i_size_write(inode, offset + len);
2583 EXT4_I(inode)->i_disksize = i_size_read(inode);
2584 mutex_unlock(&inode->i_mutex);
2585 } else if (ret < 0 && nblocks) {
2586 /* Handle partial allocation scenario */
2589 mutex_lock(&inode->i_mutex);
2590 newsize = (nblocks << blkbits) + i_size_read(inode);
2591 i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
2592 EXT4_I(inode)->i_disksize = i_size_read(inode);
2593 mutex_unlock(&inode->i_mutex);
2597 return ret > 0 ? ret2 : ret;