]> err.no Git - linux-2.6/blob - fs/ext4/balloc.c
jbd2: JBD_XXX to JBD2_XXX naming cleanup
[linux-2.6] / fs / ext4 / balloc.c
1 /*
2  *  linux/fs/ext4/balloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10  *  Big-endian to little-endian byte-swapping/bitmaps by
11  *        David S. Miller (davem@caip.rutgers.edu), 1995
12  */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/ext4_fs.h>
19 #include <linux/ext4_jbd2.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22
23 /*
24  * balloc.c contains the blocks allocation and deallocation routines
25  */
26
27 /*
28  * Calculate the block group number and offset, given a block number
29  */
30 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
31                 unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
32 {
33         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
34         ext4_grpblk_t offset;
35
36         blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
37         offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
38         if (offsetp)
39                 *offsetp = offset;
40         if (blockgrpp)
41                 *blockgrpp = blocknr;
42
43 }
44
45 /*
46  * The free blocks are managed by bitmaps.  A file system contains several
47  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
48  * block for inodes, N blocks for the inode table and data blocks.
49  *
50  * The file system contains group descriptors which are located after the
51  * super block.  Each descriptor contains the number of the bitmap block and
52  * the free blocks count in the block.  The descriptors are loaded in memory
53  * when a file system is mounted (see ext4_fill_super).
54  */
55
56
57 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
58
59 /**
60  * ext4_get_group_desc() -- load group descriptor from disk
61  * @sb:                 super block
62  * @block_group:        given block group
63  * @bh:                 pointer to the buffer head to store the block
64  *                      group descriptor
65  */
66 struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
67                                              unsigned int block_group,
68                                              struct buffer_head ** bh)
69 {
70         unsigned long group_desc;
71         unsigned long offset;
72         struct ext4_group_desc * desc;
73         struct ext4_sb_info *sbi = EXT4_SB(sb);
74
75         if (block_group >= sbi->s_groups_count) {
76                 ext4_error (sb, "ext4_get_group_desc",
77                             "block_group >= groups_count - "
78                             "block_group = %d, groups_count = %lu",
79                             block_group, sbi->s_groups_count);
80
81                 return NULL;
82         }
83         smp_rmb();
84
85         group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
86         offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
87         if (!sbi->s_group_desc[group_desc]) {
88                 ext4_error (sb, "ext4_get_group_desc",
89                             "Group descriptor not loaded - "
90                             "block_group = %d, group_desc = %lu, desc = %lu",
91                              block_group, group_desc, offset);
92                 return NULL;
93         }
94
95         desc = (struct ext4_group_desc *)(
96                 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
97                 offset * EXT4_DESC_SIZE(sb));
98         if (bh)
99                 *bh = sbi->s_group_desc[group_desc];
100         return desc;
101 }
102
103 static inline int
104 block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map)
105 {
106         ext4_grpblk_t offset;
107
108         ext4_get_group_no_and_offset(sb, block, NULL, &offset);
109         return ext4_test_bit (offset, map);
110 }
111
112 /**
113  * read_block_bitmap()
114  * @sb:                 super block
115  * @block_group:        given block group
116  *
117  * Read the bitmap for a given block_group, reading into the specified
118  * slot in the superblock's bitmap cache.
119  *
120  * Return buffer_head on success or NULL in case of failure.
121  */
122 static struct buffer_head *
123 read_block_bitmap(struct super_block *sb, unsigned int block_group)
124 {
125         int i;
126         struct ext4_group_desc * desc;
127         struct buffer_head * bh = NULL;
128         ext4_fsblk_t bitmap_blk;
129
130         desc = ext4_get_group_desc (sb, block_group, NULL);
131         if (!desc)
132                 return NULL;
133         bitmap_blk = ext4_block_bitmap(sb, desc);
134         bh = sb_bread(sb, bitmap_blk);
135         if (!bh)
136                 ext4_error (sb, __FUNCTION__,
137                             "Cannot read block bitmap - "
138                             "block_group = %d, block_bitmap = %llu",
139                             block_group, bitmap_blk);
140
141         /* check whether block bitmap block number is set */
142         if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
143                 /* bad block bitmap */
144                 goto error_out;
145         }
146
147         /* check whether the inode bitmap block number is set */
148         bitmap_blk = ext4_inode_bitmap(sb, desc);
149         if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
150                 /* bad block bitmap */
151                 goto error_out;
152         }
153         /* check whether the inode table block number is set */
154         bitmap_blk = ext4_inode_table(sb, desc);
155         for (i = 0; i < EXT4_SB(sb)->s_itb_per_group; i++, bitmap_blk++) {
156                 if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
157                         /* bad block bitmap */
158                         goto error_out;
159                 }
160         }
161
162         return bh;
163
164 error_out:
165         brelse(bh);
166         ext4_error(sb, __FUNCTION__,
167                         "Invalid block bitmap - "
168                         "block_group = %d, block = %llu",
169                         block_group, bitmap_blk);
170         return NULL;
171
172 }
173 /*
174  * The reservation window structure operations
175  * --------------------------------------------
176  * Operations include:
177  * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
178  *
179  * We use a red-black tree to represent per-filesystem reservation
180  * windows.
181  *
182  */
183
184 /**
185  * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
186  * @rb_root:            root of per-filesystem reservation rb tree
187  * @verbose:            verbose mode
188  * @fn:                 function which wishes to dump the reservation map
189  *
190  * If verbose is turned on, it will print the whole block reservation
191  * windows(start, end). Otherwise, it will only print out the "bad" windows,
192  * those windows that overlap with their immediate neighbors.
193  */
194 #if 1
195 static void __rsv_window_dump(struct rb_root *root, int verbose,
196                               const char *fn)
197 {
198         struct rb_node *n;
199         struct ext4_reserve_window_node *rsv, *prev;
200         int bad;
201
202 restart:
203         n = rb_first(root);
204         bad = 0;
205         prev = NULL;
206
207         printk("Block Allocation Reservation Windows Map (%s):\n", fn);
208         while (n) {
209                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
210                 if (verbose)
211                         printk("reservation window 0x%p "
212                                "start:  %llu, end:  %llu\n",
213                                rsv, rsv->rsv_start, rsv->rsv_end);
214                 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
215                         printk("Bad reservation %p (start >= end)\n",
216                                rsv);
217                         bad = 1;
218                 }
219                 if (prev && prev->rsv_end >= rsv->rsv_start) {
220                         printk("Bad reservation %p (prev->end >= start)\n",
221                                rsv);
222                         bad = 1;
223                 }
224                 if (bad) {
225                         if (!verbose) {
226                                 printk("Restarting reservation walk in verbose mode\n");
227                                 verbose = 1;
228                                 goto restart;
229                         }
230                 }
231                 n = rb_next(n);
232                 prev = rsv;
233         }
234         printk("Window map complete.\n");
235         if (bad)
236                 BUG();
237 }
238 #define rsv_window_dump(root, verbose) \
239         __rsv_window_dump((root), (verbose), __FUNCTION__)
240 #else
241 #define rsv_window_dump(root, verbose) do {} while (0)
242 #endif
243
244 /**
245  * goal_in_my_reservation()
246  * @rsv:                inode's reservation window
247  * @grp_goal:           given goal block relative to the allocation block group
248  * @group:              the current allocation block group
249  * @sb:                 filesystem super block
250  *
251  * Test if the given goal block (group relative) is within the file's
252  * own block reservation window range.
253  *
254  * If the reservation window is outside the goal allocation group, return 0;
255  * grp_goal (given goal block) could be -1, which means no specific
256  * goal block. In this case, always return 1.
257  * If the goal block is within the reservation window, return 1;
258  * otherwise, return 0;
259  */
260 static int
261 goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
262                         unsigned int group, struct super_block * sb)
263 {
264         ext4_fsblk_t group_first_block, group_last_block;
265
266         group_first_block = ext4_group_first_block_no(sb, group);
267         group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
268
269         if ((rsv->_rsv_start > group_last_block) ||
270             (rsv->_rsv_end < group_first_block))
271                 return 0;
272         if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
273                 || (grp_goal + group_first_block > rsv->_rsv_end)))
274                 return 0;
275         return 1;
276 }
277
278 /**
279  * search_reserve_window()
280  * @rb_root:            root of reservation tree
281  * @goal:               target allocation block
282  *
283  * Find the reserved window which includes the goal, or the previous one
284  * if the goal is not in any window.
285  * Returns NULL if there are no windows or if all windows start after the goal.
286  */
287 static struct ext4_reserve_window_node *
288 search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
289 {
290         struct rb_node *n = root->rb_node;
291         struct ext4_reserve_window_node *rsv;
292
293         if (!n)
294                 return NULL;
295
296         do {
297                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
298
299                 if (goal < rsv->rsv_start)
300                         n = n->rb_left;
301                 else if (goal > rsv->rsv_end)
302                         n = n->rb_right;
303                 else
304                         return rsv;
305         } while (n);
306         /*
307          * We've fallen off the end of the tree: the goal wasn't inside
308          * any particular node.  OK, the previous node must be to one
309          * side of the interval containing the goal.  If it's the RHS,
310          * we need to back up one.
311          */
312         if (rsv->rsv_start > goal) {
313                 n = rb_prev(&rsv->rsv_node);
314                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
315         }
316         return rsv;
317 }
318
319 /**
320  * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
321  * @sb:                 super block
322  * @rsv:                reservation window to add
323  *
324  * Must be called with rsv_lock hold.
325  */
326 void ext4_rsv_window_add(struct super_block *sb,
327                     struct ext4_reserve_window_node *rsv)
328 {
329         struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
330         struct rb_node *node = &rsv->rsv_node;
331         ext4_fsblk_t start = rsv->rsv_start;
332
333         struct rb_node ** p = &root->rb_node;
334         struct rb_node * parent = NULL;
335         struct ext4_reserve_window_node *this;
336
337         while (*p)
338         {
339                 parent = *p;
340                 this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
341
342                 if (start < this->rsv_start)
343                         p = &(*p)->rb_left;
344                 else if (start > this->rsv_end)
345                         p = &(*p)->rb_right;
346                 else {
347                         rsv_window_dump(root, 1);
348                         BUG();
349                 }
350         }
351
352         rb_link_node(node, parent, p);
353         rb_insert_color(node, root);
354 }
355
356 /**
357  * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
358  * @sb:                 super block
359  * @rsv:                reservation window to remove
360  *
361  * Mark the block reservation window as not allocated, and unlink it
362  * from the filesystem reservation window rb tree. Must be called with
363  * rsv_lock hold.
364  */
365 static void rsv_window_remove(struct super_block *sb,
366                               struct ext4_reserve_window_node *rsv)
367 {
368         rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
369         rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
370         rsv->rsv_alloc_hit = 0;
371         rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
372 }
373
374 /*
375  * rsv_is_empty() -- Check if the reservation window is allocated.
376  * @rsv:                given reservation window to check
377  *
378  * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
379  */
380 static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
381 {
382         /* a valid reservation end block could not be 0 */
383         return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
384 }
385
386 /**
387  * ext4_init_block_alloc_info()
388  * @inode:              file inode structure
389  *
390  * Allocate and initialize the  reservation window structure, and
391  * link the window to the ext4 inode structure at last
392  *
393  * The reservation window structure is only dynamically allocated
394  * and linked to ext4 inode the first time the open file
395  * needs a new block. So, before every ext4_new_block(s) call, for
396  * regular files, we should check whether the reservation window
397  * structure exists or not. In the latter case, this function is called.
398  * Fail to do so will result in block reservation being turned off for that
399  * open file.
400  *
401  * This function is called from ext4_get_blocks_handle(), also called
402  * when setting the reservation window size through ioctl before the file
403  * is open for write (needs block allocation).
404  *
405  * Needs truncate_mutex protection prior to call this function.
406  */
407 void ext4_init_block_alloc_info(struct inode *inode)
408 {
409         struct ext4_inode_info *ei = EXT4_I(inode);
410         struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
411         struct super_block *sb = inode->i_sb;
412
413         block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
414         if (block_i) {
415                 struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
416
417                 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
418                 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
419
420                 /*
421                  * if filesystem is mounted with NORESERVATION, the goal
422                  * reservation window size is set to zero to indicate
423                  * block reservation is off
424                  */
425                 if (!test_opt(sb, RESERVATION))
426                         rsv->rsv_goal_size = 0;
427                 else
428                         rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
429                 rsv->rsv_alloc_hit = 0;
430                 block_i->last_alloc_logical_block = 0;
431                 block_i->last_alloc_physical_block = 0;
432         }
433         ei->i_block_alloc_info = block_i;
434 }
435
436 /**
437  * ext4_discard_reservation()
438  * @inode:              inode
439  *
440  * Discard(free) block reservation window on last file close, or truncate
441  * or at last iput().
442  *
443  * It is being called in three cases:
444  *      ext4_release_file(): last writer close the file
445  *      ext4_clear_inode(): last iput(), when nobody link to this file.
446  *      ext4_truncate(): when the block indirect map is about to change.
447  *
448  */
449 void ext4_discard_reservation(struct inode *inode)
450 {
451         struct ext4_inode_info *ei = EXT4_I(inode);
452         struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
453         struct ext4_reserve_window_node *rsv;
454         spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
455
456         if (!block_i)
457                 return;
458
459         rsv = &block_i->rsv_window_node;
460         if (!rsv_is_empty(&rsv->rsv_window)) {
461                 spin_lock(rsv_lock);
462                 if (!rsv_is_empty(&rsv->rsv_window))
463                         rsv_window_remove(inode->i_sb, rsv);
464                 spin_unlock(rsv_lock);
465         }
466 }
467
468 /**
469  * ext4_free_blocks_sb() -- Free given blocks and update quota
470  * @handle:                     handle to this transaction
471  * @sb:                         super block
472  * @block:                      start physcial block to free
473  * @count:                      number of blocks to free
474  * @pdquot_freed_blocks:        pointer to quota
475  */
476 void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
477                          ext4_fsblk_t block, unsigned long count,
478                          unsigned long *pdquot_freed_blocks)
479 {
480         struct buffer_head *bitmap_bh = NULL;
481         struct buffer_head *gd_bh;
482         unsigned long block_group;
483         ext4_grpblk_t bit;
484         unsigned long i;
485         unsigned long overflow;
486         struct ext4_group_desc * desc;
487         struct ext4_super_block * es;
488         struct ext4_sb_info *sbi;
489         int err = 0, ret;
490         ext4_grpblk_t group_freed;
491
492         *pdquot_freed_blocks = 0;
493         sbi = EXT4_SB(sb);
494         es = sbi->s_es;
495         if (block < le32_to_cpu(es->s_first_data_block) ||
496             block + count < block ||
497             block + count > ext4_blocks_count(es)) {
498                 ext4_error (sb, "ext4_free_blocks",
499                             "Freeing blocks not in datazone - "
500                             "block = %llu, count = %lu", block, count);
501                 goto error_return;
502         }
503
504         ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
505
506 do_more:
507         overflow = 0;
508         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
509         /*
510          * Check to see if we are freeing blocks across a group
511          * boundary.
512          */
513         if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
514                 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
515                 count -= overflow;
516         }
517         brelse(bitmap_bh);
518         bitmap_bh = read_block_bitmap(sb, block_group);
519         if (!bitmap_bh)
520                 goto error_return;
521         desc = ext4_get_group_desc (sb, block_group, &gd_bh);
522         if (!desc)
523                 goto error_return;
524
525         if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
526             in_range(ext4_inode_bitmap(sb, desc), block, count) ||
527             in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
528             in_range(block + count - 1, ext4_inode_table(sb, desc),
529                      sbi->s_itb_per_group))
530                 ext4_error (sb, "ext4_free_blocks",
531                             "Freeing blocks in system zones - "
532                             "Block = %llu, count = %lu",
533                             block, count);
534
535         /*
536          * We are about to start releasing blocks in the bitmap,
537          * so we need undo access.
538          */
539         /* @@@ check errors */
540         BUFFER_TRACE(bitmap_bh, "getting undo access");
541         err = ext4_journal_get_undo_access(handle, bitmap_bh);
542         if (err)
543                 goto error_return;
544
545         /*
546          * We are about to modify some metadata.  Call the journal APIs
547          * to unshare ->b_data if a currently-committing transaction is
548          * using it
549          */
550         BUFFER_TRACE(gd_bh, "get_write_access");
551         err = ext4_journal_get_write_access(handle, gd_bh);
552         if (err)
553                 goto error_return;
554
555         jbd_lock_bh_state(bitmap_bh);
556
557         for (i = 0, group_freed = 0; i < count; i++) {
558                 /*
559                  * An HJ special.  This is expensive...
560                  */
561 #ifdef CONFIG_JBD2_DEBUG
562                 jbd_unlock_bh_state(bitmap_bh);
563                 {
564                         struct buffer_head *debug_bh;
565                         debug_bh = sb_find_get_block(sb, block + i);
566                         if (debug_bh) {
567                                 BUFFER_TRACE(debug_bh, "Deleted!");
568                                 if (!bh2jh(bitmap_bh)->b_committed_data)
569                                         BUFFER_TRACE(debug_bh,
570                                                 "No commited data in bitmap");
571                                 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
572                                 __brelse(debug_bh);
573                         }
574                 }
575                 jbd_lock_bh_state(bitmap_bh);
576 #endif
577                 if (need_resched()) {
578                         jbd_unlock_bh_state(bitmap_bh);
579                         cond_resched();
580                         jbd_lock_bh_state(bitmap_bh);
581                 }
582                 /* @@@ This prevents newly-allocated data from being
583                  * freed and then reallocated within the same
584                  * transaction.
585                  *
586                  * Ideally we would want to allow that to happen, but to
587                  * do so requires making jbd2_journal_forget() capable of
588                  * revoking the queued write of a data block, which
589                  * implies blocking on the journal lock.  *forget()
590                  * cannot block due to truncate races.
591                  *
592                  * Eventually we can fix this by making jbd2_journal_forget()
593                  * return a status indicating whether or not it was able
594                  * to revoke the buffer.  On successful revoke, it is
595                  * safe not to set the allocation bit in the committed
596                  * bitmap, because we know that there is no outstanding
597                  * activity on the buffer any more and so it is safe to
598                  * reallocate it.
599                  */
600                 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
601                 J_ASSERT_BH(bitmap_bh,
602                                 bh2jh(bitmap_bh)->b_committed_data != NULL);
603                 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
604                                 bh2jh(bitmap_bh)->b_committed_data);
605
606                 /*
607                  * We clear the bit in the bitmap after setting the committed
608                  * data bit, because this is the reverse order to that which
609                  * the allocator uses.
610                  */
611                 BUFFER_TRACE(bitmap_bh, "clear bit");
612                 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
613                                                 bit + i, bitmap_bh->b_data)) {
614                         jbd_unlock_bh_state(bitmap_bh);
615                         ext4_error(sb, __FUNCTION__,
616                                    "bit already cleared for block %llu",
617                                    (ext4_fsblk_t)(block + i));
618                         jbd_lock_bh_state(bitmap_bh);
619                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
620                 } else {
621                         group_freed++;
622                 }
623         }
624         jbd_unlock_bh_state(bitmap_bh);
625
626         spin_lock(sb_bgl_lock(sbi, block_group));
627         desc->bg_free_blocks_count =
628                 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
629                         group_freed);
630         spin_unlock(sb_bgl_lock(sbi, block_group));
631         percpu_counter_add(&sbi->s_freeblocks_counter, count);
632
633         /* We dirtied the bitmap block */
634         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
635         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
636
637         /* And the group descriptor block */
638         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
639         ret = ext4_journal_dirty_metadata(handle, gd_bh);
640         if (!err) err = ret;
641         *pdquot_freed_blocks += group_freed;
642
643         if (overflow && !err) {
644                 block += count;
645                 count = overflow;
646                 goto do_more;
647         }
648         sb->s_dirt = 1;
649 error_return:
650         brelse(bitmap_bh);
651         ext4_std_error(sb, err);
652         return;
653 }
654
655 /**
656  * ext4_free_blocks() -- Free given blocks and update quota
657  * @handle:             handle for this transaction
658  * @inode:              inode
659  * @block:              start physical block to free
660  * @count:              number of blocks to count
661  */
662 void ext4_free_blocks(handle_t *handle, struct inode *inode,
663                         ext4_fsblk_t block, unsigned long count)
664 {
665         struct super_block * sb;
666         unsigned long dquot_freed_blocks;
667
668         sb = inode->i_sb;
669         if (!sb) {
670                 printk ("ext4_free_blocks: nonexistent device");
671                 return;
672         }
673         ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
674         if (dquot_freed_blocks)
675                 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
676         return;
677 }
678
679 /**
680  * ext4_test_allocatable()
681  * @nr:                 given allocation block group
682  * @bh:                 bufferhead contains the bitmap of the given block group
683  *
684  * For ext4 allocations, we must not reuse any blocks which are
685  * allocated in the bitmap buffer's "last committed data" copy.  This
686  * prevents deletes from freeing up the page for reuse until we have
687  * committed the delete transaction.
688  *
689  * If we didn't do this, then deleting something and reallocating it as
690  * data would allow the old block to be overwritten before the
691  * transaction committed (because we force data to disk before commit).
692  * This would lead to corruption if we crashed between overwriting the
693  * data and committing the delete.
694  *
695  * @@@ We may want to make this allocation behaviour conditional on
696  * data-writes at some point, and disable it for metadata allocations or
697  * sync-data inodes.
698  */
699 static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
700 {
701         int ret;
702         struct journal_head *jh = bh2jh(bh);
703
704         if (ext4_test_bit(nr, bh->b_data))
705                 return 0;
706
707         jbd_lock_bh_state(bh);
708         if (!jh->b_committed_data)
709                 ret = 1;
710         else
711                 ret = !ext4_test_bit(nr, jh->b_committed_data);
712         jbd_unlock_bh_state(bh);
713         return ret;
714 }
715
716 /**
717  * bitmap_search_next_usable_block()
718  * @start:              the starting block (group relative) of the search
719  * @bh:                 bufferhead contains the block group bitmap
720  * @maxblocks:          the ending block (group relative) of the reservation
721  *
722  * The bitmap search --- search forward alternately through the actual
723  * bitmap on disk and the last-committed copy in journal, until we find a
724  * bit free in both bitmaps.
725  */
726 static ext4_grpblk_t
727 bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
728                                         ext4_grpblk_t maxblocks)
729 {
730         ext4_grpblk_t next;
731         struct journal_head *jh = bh2jh(bh);
732
733         while (start < maxblocks) {
734                 next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
735                 if (next >= maxblocks)
736                         return -1;
737                 if (ext4_test_allocatable(next, bh))
738                         return next;
739                 jbd_lock_bh_state(bh);
740                 if (jh->b_committed_data)
741                         start = ext4_find_next_zero_bit(jh->b_committed_data,
742                                                         maxblocks, next);
743                 jbd_unlock_bh_state(bh);
744         }
745         return -1;
746 }
747
748 /**
749  * find_next_usable_block()
750  * @start:              the starting block (group relative) to find next
751  *                      allocatable block in bitmap.
752  * @bh:                 bufferhead contains the block group bitmap
753  * @maxblocks:          the ending block (group relative) for the search
754  *
755  * Find an allocatable block in a bitmap.  We honor both the bitmap and
756  * its last-committed copy (if that exists), and perform the "most
757  * appropriate allocation" algorithm of looking for a free block near
758  * the initial goal; then for a free byte somewhere in the bitmap; then
759  * for any free bit in the bitmap.
760  */
761 static ext4_grpblk_t
762 find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
763                         ext4_grpblk_t maxblocks)
764 {
765         ext4_grpblk_t here, next;
766         char *p, *r;
767
768         if (start > 0) {
769                 /*
770                  * The goal was occupied; search forward for a free
771                  * block within the next XX blocks.
772                  *
773                  * end_goal is more or less random, but it has to be
774                  * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
775                  * next 64-bit boundary is simple..
776                  */
777                 ext4_grpblk_t end_goal = (start + 63) & ~63;
778                 if (end_goal > maxblocks)
779                         end_goal = maxblocks;
780                 here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
781                 if (here < end_goal && ext4_test_allocatable(here, bh))
782                         return here;
783                 ext4_debug("Bit not found near goal\n");
784         }
785
786         here = start;
787         if (here < 0)
788                 here = 0;
789
790         p = ((char *)bh->b_data) + (here >> 3);
791         r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
792         next = (r - ((char *)bh->b_data)) << 3;
793
794         if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
795                 return next;
796
797         /*
798          * The bitmap search --- search forward alternately through the actual
799          * bitmap and the last-committed copy until we find a bit free in
800          * both
801          */
802         here = bitmap_search_next_usable_block(here, bh, maxblocks);
803         return here;
804 }
805
806 /**
807  * claim_block()
808  * @block:              the free block (group relative) to allocate
809  * @bh:                 the bufferhead containts the block group bitmap
810  *
811  * We think we can allocate this block in this bitmap.  Try to set the bit.
812  * If that succeeds then check that nobody has allocated and then freed the
813  * block since we saw that is was not marked in b_committed_data.  If it _was_
814  * allocated and freed then clear the bit in the bitmap again and return
815  * zero (failure).
816  */
817 static inline int
818 claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
819 {
820         struct journal_head *jh = bh2jh(bh);
821         int ret;
822
823         if (ext4_set_bit_atomic(lock, block, bh->b_data))
824                 return 0;
825         jbd_lock_bh_state(bh);
826         if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
827                 ext4_clear_bit_atomic(lock, block, bh->b_data);
828                 ret = 0;
829         } else {
830                 ret = 1;
831         }
832         jbd_unlock_bh_state(bh);
833         return ret;
834 }
835
836 /**
837  * ext4_try_to_allocate()
838  * @sb:                 superblock
839  * @handle:             handle to this transaction
840  * @group:              given allocation block group
841  * @bitmap_bh:          bufferhead holds the block bitmap
842  * @grp_goal:           given target block within the group
843  * @count:              target number of blocks to allocate
844  * @my_rsv:             reservation window
845  *
846  * Attempt to allocate blocks within a give range. Set the range of allocation
847  * first, then find the first free bit(s) from the bitmap (within the range),
848  * and at last, allocate the blocks by claiming the found free bit as allocated.
849  *
850  * To set the range of this allocation:
851  *      if there is a reservation window, only try to allocate block(s) from the
852  *      file's own reservation window;
853  *      Otherwise, the allocation range starts from the give goal block, ends at
854  *      the block group's last block.
855  *
856  * If we failed to allocate the desired block then we may end up crossing to a
857  * new bitmap.  In that case we must release write access to the old one via
858  * ext4_journal_release_buffer(), else we'll run out of credits.
859  */
860 static ext4_grpblk_t
861 ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
862                         struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal,
863                         unsigned long *count, struct ext4_reserve_window *my_rsv)
864 {
865         ext4_fsblk_t group_first_block;
866         ext4_grpblk_t start, end;
867         unsigned long num = 0;
868
869         /* we do allocation within the reservation window if we have a window */
870         if (my_rsv) {
871                 group_first_block = ext4_group_first_block_no(sb, group);
872                 if (my_rsv->_rsv_start >= group_first_block)
873                         start = my_rsv->_rsv_start - group_first_block;
874                 else
875                         /* reservation window cross group boundary */
876                         start = 0;
877                 end = my_rsv->_rsv_end - group_first_block + 1;
878                 if (end > EXT4_BLOCKS_PER_GROUP(sb))
879                         /* reservation window crosses group boundary */
880                         end = EXT4_BLOCKS_PER_GROUP(sb);
881                 if ((start <= grp_goal) && (grp_goal < end))
882                         start = grp_goal;
883                 else
884                         grp_goal = -1;
885         } else {
886                 if (grp_goal > 0)
887                         start = grp_goal;
888                 else
889                         start = 0;
890                 end = EXT4_BLOCKS_PER_GROUP(sb);
891         }
892
893         BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
894
895 repeat:
896         if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
897                 grp_goal = find_next_usable_block(start, bitmap_bh, end);
898                 if (grp_goal < 0)
899                         goto fail_access;
900                 if (!my_rsv) {
901                         int i;
902
903                         for (i = 0; i < 7 && grp_goal > start &&
904                                         ext4_test_allocatable(grp_goal - 1,
905                                                                 bitmap_bh);
906                                         i++, grp_goal--)
907                                 ;
908                 }
909         }
910         start = grp_goal;
911
912         if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
913                 grp_goal, bitmap_bh)) {
914                 /*
915                  * The block was allocated by another thread, or it was
916                  * allocated and then freed by another thread
917                  */
918                 start++;
919                 grp_goal++;
920                 if (start >= end)
921                         goto fail_access;
922                 goto repeat;
923         }
924         num++;
925         grp_goal++;
926         while (num < *count && grp_goal < end
927                 && ext4_test_allocatable(grp_goal, bitmap_bh)
928                 && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
929                                 grp_goal, bitmap_bh)) {
930                 num++;
931                 grp_goal++;
932         }
933         *count = num;
934         return grp_goal - num;
935 fail_access:
936         *count = num;
937         return -1;
938 }
939
940 /**
941  *      find_next_reservable_window():
942  *              find a reservable space within the given range.
943  *              It does not allocate the reservation window for now:
944  *              alloc_new_reservation() will do the work later.
945  *
946  *      @search_head: the head of the searching list;
947  *              This is not necessarily the list head of the whole filesystem
948  *
949  *              We have both head and start_block to assist the search
950  *              for the reservable space. The list starts from head,
951  *              but we will shift to the place where start_block is,
952  *              then start from there, when looking for a reservable space.
953  *
954  *      @size: the target new reservation window size
955  *
956  *      @group_first_block: the first block we consider to start
957  *                      the real search from
958  *
959  *      @last_block:
960  *              the maximum block number that our goal reservable space
961  *              could start from. This is normally the last block in this
962  *              group. The search will end when we found the start of next
963  *              possible reservable space is out of this boundary.
964  *              This could handle the cross boundary reservation window
965  *              request.
966  *
967  *      basically we search from the given range, rather than the whole
968  *      reservation double linked list, (start_block, last_block)
969  *      to find a free region that is of my size and has not
970  *      been reserved.
971  *
972  */
973 static int find_next_reservable_window(
974                                 struct ext4_reserve_window_node *search_head,
975                                 struct ext4_reserve_window_node *my_rsv,
976                                 struct super_block * sb,
977                                 ext4_fsblk_t start_block,
978                                 ext4_fsblk_t last_block)
979 {
980         struct rb_node *next;
981         struct ext4_reserve_window_node *rsv, *prev;
982         ext4_fsblk_t cur;
983         int size = my_rsv->rsv_goal_size;
984
985         /* TODO: make the start of the reservation window byte-aligned */
986         /* cur = *start_block & ~7;*/
987         cur = start_block;
988         rsv = search_head;
989         if (!rsv)
990                 return -1;
991
992         while (1) {
993                 if (cur <= rsv->rsv_end)
994                         cur = rsv->rsv_end + 1;
995
996                 /* TODO?
997                  * in the case we could not find a reservable space
998                  * that is what is expected, during the re-search, we could
999                  * remember what's the largest reservable space we could have
1000                  * and return that one.
1001                  *
1002                  * For now it will fail if we could not find the reservable
1003                  * space with expected-size (or more)...
1004                  */
1005                 if (cur > last_block)
1006                         return -1;              /* fail */
1007
1008                 prev = rsv;
1009                 next = rb_next(&rsv->rsv_node);
1010                 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
1011
1012                 /*
1013                  * Reached the last reservation, we can just append to the
1014                  * previous one.
1015                  */
1016                 if (!next)
1017                         break;
1018
1019                 if (cur + size <= rsv->rsv_start) {
1020                         /*
1021                          * Found a reserveable space big enough.  We could
1022                          * have a reservation across the group boundary here
1023                          */
1024                         break;
1025                 }
1026         }
1027         /*
1028          * we come here either :
1029          * when we reach the end of the whole list,
1030          * and there is empty reservable space after last entry in the list.
1031          * append it to the end of the list.
1032          *
1033          * or we found one reservable space in the middle of the list,
1034          * return the reservation window that we could append to.
1035          * succeed.
1036          */
1037
1038         if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1039                 rsv_window_remove(sb, my_rsv);
1040
1041         /*
1042          * Let's book the whole avaliable window for now.  We will check the
1043          * disk bitmap later and then, if there are free blocks then we adjust
1044          * the window size if it's larger than requested.
1045          * Otherwise, we will remove this node from the tree next time
1046          * call find_next_reservable_window.
1047          */
1048         my_rsv->rsv_start = cur;
1049         my_rsv->rsv_end = cur + size - 1;
1050         my_rsv->rsv_alloc_hit = 0;
1051
1052         if (prev != my_rsv)
1053                 ext4_rsv_window_add(sb, my_rsv);
1054
1055         return 0;
1056 }
1057
1058 /**
1059  *      alloc_new_reservation()--allocate a new reservation window
1060  *
1061  *              To make a new reservation, we search part of the filesystem
1062  *              reservation list (the list that inside the group). We try to
1063  *              allocate a new reservation window near the allocation goal,
1064  *              or the beginning of the group, if there is no goal.
1065  *
1066  *              We first find a reservable space after the goal, then from
1067  *              there, we check the bitmap for the first free block after
1068  *              it. If there is no free block until the end of group, then the
1069  *              whole group is full, we failed. Otherwise, check if the free
1070  *              block is inside the expected reservable space, if so, we
1071  *              succeed.
1072  *              If the first free block is outside the reservable space, then
1073  *              start from the first free block, we search for next available
1074  *              space, and go on.
1075  *
1076  *      on succeed, a new reservation will be found and inserted into the list
1077  *      It contains at least one free block, and it does not overlap with other
1078  *      reservation windows.
1079  *
1080  *      failed: we failed to find a reservation window in this group
1081  *
1082  *      @rsv: the reservation
1083  *
1084  *      @grp_goal: The goal (group-relative).  It is where the search for a
1085  *              free reservable space should start from.
1086  *              if we have a grp_goal(grp_goal >0 ), then start from there,
1087  *              no grp_goal(grp_goal = -1), we start from the first block
1088  *              of the group.
1089  *
1090  *      @sb: the super block
1091  *      @group: the group we are trying to allocate in
1092  *      @bitmap_bh: the block group block bitmap
1093  *
1094  */
1095 static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
1096                 ext4_grpblk_t grp_goal, struct super_block *sb,
1097                 unsigned int group, struct buffer_head *bitmap_bh)
1098 {
1099         struct ext4_reserve_window_node *search_head;
1100         ext4_fsblk_t group_first_block, group_end_block, start_block;
1101         ext4_grpblk_t first_free_block;
1102         struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1103         unsigned long size;
1104         int ret;
1105         spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1106
1107         group_first_block = ext4_group_first_block_no(sb, group);
1108         group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1109
1110         if (grp_goal < 0)
1111                 start_block = group_first_block;
1112         else
1113                 start_block = grp_goal + group_first_block;
1114
1115         size = my_rsv->rsv_goal_size;
1116
1117         if (!rsv_is_empty(&my_rsv->rsv_window)) {
1118                 /*
1119                  * if the old reservation is cross group boundary
1120                  * and if the goal is inside the old reservation window,
1121                  * we will come here when we just failed to allocate from
1122                  * the first part of the window. We still have another part
1123                  * that belongs to the next group. In this case, there is no
1124                  * point to discard our window and try to allocate a new one
1125                  * in this group(which will fail). we should
1126                  * keep the reservation window, just simply move on.
1127                  *
1128                  * Maybe we could shift the start block of the reservation
1129                  * window to the first block of next group.
1130                  */
1131
1132                 if ((my_rsv->rsv_start <= group_end_block) &&
1133                                 (my_rsv->rsv_end > group_end_block) &&
1134                                 (start_block >= my_rsv->rsv_start))
1135                         return -1;
1136
1137                 if ((my_rsv->rsv_alloc_hit >
1138                      (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1139                         /*
1140                          * if the previously allocation hit ratio is
1141                          * greater than 1/2, then we double the size of
1142                          * the reservation window the next time,
1143                          * otherwise we keep the same size window
1144                          */
1145                         size = size * 2;
1146                         if (size > EXT4_MAX_RESERVE_BLOCKS)
1147                                 size = EXT4_MAX_RESERVE_BLOCKS;
1148                         my_rsv->rsv_goal_size= size;
1149                 }
1150         }
1151
1152         spin_lock(rsv_lock);
1153         /*
1154          * shift the search start to the window near the goal block
1155          */
1156         search_head = search_reserve_window(fs_rsv_root, start_block);
1157
1158         /*
1159          * find_next_reservable_window() simply finds a reservable window
1160          * inside the given range(start_block, group_end_block).
1161          *
1162          * To make sure the reservation window has a free bit inside it, we
1163          * need to check the bitmap after we found a reservable window.
1164          */
1165 retry:
1166         ret = find_next_reservable_window(search_head, my_rsv, sb,
1167                                                 start_block, group_end_block);
1168
1169         if (ret == -1) {
1170                 if (!rsv_is_empty(&my_rsv->rsv_window))
1171                         rsv_window_remove(sb, my_rsv);
1172                 spin_unlock(rsv_lock);
1173                 return -1;
1174         }
1175
1176         /*
1177          * On success, find_next_reservable_window() returns the
1178          * reservation window where there is a reservable space after it.
1179          * Before we reserve this reservable space, we need
1180          * to make sure there is at least a free block inside this region.
1181          *
1182          * searching the first free bit on the block bitmap and copy of
1183          * last committed bitmap alternatively, until we found a allocatable
1184          * block. Search start from the start block of the reservable space
1185          * we just found.
1186          */
1187         spin_unlock(rsv_lock);
1188         first_free_block = bitmap_search_next_usable_block(
1189                         my_rsv->rsv_start - group_first_block,
1190                         bitmap_bh, group_end_block - group_first_block + 1);
1191
1192         if (first_free_block < 0) {
1193                 /*
1194                  * no free block left on the bitmap, no point
1195                  * to reserve the space. return failed.
1196                  */
1197                 spin_lock(rsv_lock);
1198                 if (!rsv_is_empty(&my_rsv->rsv_window))
1199                         rsv_window_remove(sb, my_rsv);
1200                 spin_unlock(rsv_lock);
1201                 return -1;              /* failed */
1202         }
1203
1204         start_block = first_free_block + group_first_block;
1205         /*
1206          * check if the first free block is within the
1207          * free space we just reserved
1208          */
1209         if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1210                 return 0;               /* success */
1211         /*
1212          * if the first free bit we found is out of the reservable space
1213          * continue search for next reservable space,
1214          * start from where the free block is,
1215          * we also shift the list head to where we stopped last time
1216          */
1217         search_head = my_rsv;
1218         spin_lock(rsv_lock);
1219         goto retry;
1220 }
1221
1222 /**
1223  * try_to_extend_reservation()
1224  * @my_rsv:             given reservation window
1225  * @sb:                 super block
1226  * @size:               the delta to extend
1227  *
1228  * Attempt to expand the reservation window large enough to have
1229  * required number of free blocks
1230  *
1231  * Since ext4_try_to_allocate() will always allocate blocks within
1232  * the reservation window range, if the window size is too small,
1233  * multiple blocks allocation has to stop at the end of the reservation
1234  * window. To make this more efficient, given the total number of
1235  * blocks needed and the current size of the window, we try to
1236  * expand the reservation window size if necessary on a best-effort
1237  * basis before ext4_new_blocks() tries to allocate blocks,
1238  */
1239 static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1240                         struct super_block *sb, int size)
1241 {
1242         struct ext4_reserve_window_node *next_rsv;
1243         struct rb_node *next;
1244         spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1245
1246         if (!spin_trylock(rsv_lock))
1247                 return;
1248
1249         next = rb_next(&my_rsv->rsv_node);
1250
1251         if (!next)
1252                 my_rsv->rsv_end += size;
1253         else {
1254                 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1255
1256                 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1257                         my_rsv->rsv_end += size;
1258                 else
1259                         my_rsv->rsv_end = next_rsv->rsv_start - 1;
1260         }
1261         spin_unlock(rsv_lock);
1262 }
1263
1264 /**
1265  * ext4_try_to_allocate_with_rsv()
1266  * @sb:                 superblock
1267  * @handle:             handle to this transaction
1268  * @group:              given allocation block group
1269  * @bitmap_bh:          bufferhead holds the block bitmap
1270  * @grp_goal:           given target block within the group
1271  * @count:              target number of blocks to allocate
1272  * @my_rsv:             reservation window
1273  * @errp:               pointer to store the error code
1274  *
1275  * This is the main function used to allocate a new block and its reservation
1276  * window.
1277  *
1278  * Each time when a new block allocation is need, first try to allocate from
1279  * its own reservation.  If it does not have a reservation window, instead of
1280  * looking for a free bit on bitmap first, then look up the reservation list to
1281  * see if it is inside somebody else's reservation window, we try to allocate a
1282  * reservation window for it starting from the goal first. Then do the block
1283  * allocation within the reservation window.
1284  *
1285  * This will avoid keeping on searching the reservation list again and
1286  * again when somebody is looking for a free block (without
1287  * reservation), and there are lots of free blocks, but they are all
1288  * being reserved.
1289  *
1290  * We use a red-black tree for the per-filesystem reservation list.
1291  *
1292  */
1293 static ext4_grpblk_t
1294 ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1295                         unsigned int group, struct buffer_head *bitmap_bh,
1296                         ext4_grpblk_t grp_goal,
1297                         struct ext4_reserve_window_node * my_rsv,
1298                         unsigned long *count, int *errp)
1299 {
1300         ext4_fsblk_t group_first_block, group_last_block;
1301         ext4_grpblk_t ret = 0;
1302         int fatal;
1303         unsigned long num = *count;
1304
1305         *errp = 0;
1306
1307         /*
1308          * Make sure we use undo access for the bitmap, because it is critical
1309          * that we do the frozen_data COW on bitmap buffers in all cases even
1310          * if the buffer is in BJ_Forget state in the committing transaction.
1311          */
1312         BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1313         fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1314         if (fatal) {
1315                 *errp = fatal;
1316                 return -1;
1317         }
1318
1319         /*
1320          * we don't deal with reservation when
1321          * filesystem is mounted without reservation
1322          * or the file is not a regular file
1323          * or last attempt to allocate a block with reservation turned on failed
1324          */
1325         if (my_rsv == NULL ) {
1326                 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1327                                                 grp_goal, count, NULL);
1328                 goto out;
1329         }
1330         /*
1331          * grp_goal is a group relative block number (if there is a goal)
1332          * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1333          * first block is a filesystem wide block number
1334          * first block is the block number of the first block in this group
1335          */
1336         group_first_block = ext4_group_first_block_no(sb, group);
1337         group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1338
1339         /*
1340          * Basically we will allocate a new block from inode's reservation
1341          * window.
1342          *
1343          * We need to allocate a new reservation window, if:
1344          * a) inode does not have a reservation window; or
1345          * b) last attempt to allocate a block from existing reservation
1346          *    failed; or
1347          * c) we come here with a goal and with a reservation window
1348          *
1349          * We do not need to allocate a new reservation window if we come here
1350          * at the beginning with a goal and the goal is inside the window, or
1351          * we don't have a goal but already have a reservation window.
1352          * then we could go to allocate from the reservation window directly.
1353          */
1354         while (1) {
1355                 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1356                         !goal_in_my_reservation(&my_rsv->rsv_window,
1357                                                 grp_goal, group, sb)) {
1358                         if (my_rsv->rsv_goal_size < *count)
1359                                 my_rsv->rsv_goal_size = *count;
1360                         ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1361                                                         group, bitmap_bh);
1362                         if (ret < 0)
1363                                 break;                  /* failed */
1364
1365                         if (!goal_in_my_reservation(&my_rsv->rsv_window,
1366                                                         grp_goal, group, sb))
1367                                 grp_goal = -1;
1368                 } else if (grp_goal >= 0) {
1369                         int curr = my_rsv->rsv_end -
1370                                         (grp_goal + group_first_block) + 1;
1371
1372                         if (curr < *count)
1373                                 try_to_extend_reservation(my_rsv, sb,
1374                                                         *count - curr);
1375                 }
1376
1377                 if ((my_rsv->rsv_start > group_last_block) ||
1378                                 (my_rsv->rsv_end < group_first_block)) {
1379                         rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1380                         BUG();
1381                 }
1382                 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1383                                            grp_goal, &num, &my_rsv->rsv_window);
1384                 if (ret >= 0) {
1385                         my_rsv->rsv_alloc_hit += num;
1386                         *count = num;
1387                         break;                          /* succeed */
1388                 }
1389                 num = *count;
1390         }
1391 out:
1392         if (ret >= 0) {
1393                 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1394                                         "bitmap block");
1395                 fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1396                 if (fatal) {
1397                         *errp = fatal;
1398                         return -1;
1399                 }
1400                 return ret;
1401         }
1402
1403         BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1404         ext4_journal_release_buffer(handle, bitmap_bh);
1405         return ret;
1406 }
1407
1408 /**
1409  * ext4_has_free_blocks()
1410  * @sbi:                in-core super block structure.
1411  *
1412  * Check if filesystem has at least 1 free block available for allocation.
1413  */
1414 static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
1415 {
1416         ext4_fsblk_t free_blocks, root_blocks;
1417
1418         free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1419         root_blocks = ext4_r_blocks_count(sbi->s_es);
1420         if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1421                 sbi->s_resuid != current->fsuid &&
1422                 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1423                 return 0;
1424         }
1425         return 1;
1426 }
1427
1428 /**
1429  * ext4_should_retry_alloc()
1430  * @sb:                 super block
1431  * @retries             number of attemps has been made
1432  *
1433  * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
1434  * it is profitable to retry the operation, this function will wait
1435  * for the current or commiting transaction to complete, and then
1436  * return TRUE.
1437  *
1438  * if the total number of retries exceed three times, return FALSE.
1439  */
1440 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1441 {
1442         if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
1443                 return 0;
1444
1445         jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1446
1447         return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1448 }
1449
1450 /**
1451  * ext4_new_blocks() -- core block(s) allocation function
1452  * @handle:             handle to this transaction
1453  * @inode:              file inode
1454  * @goal:               given target block(filesystem wide)
1455  * @count:              target number of blocks to allocate
1456  * @errp:               error code
1457  *
1458  * ext4_new_blocks uses a goal block to assist allocation.  It tries to
1459  * allocate block(s) from the block group contains the goal block first. If that
1460  * fails, it will try to allocate block(s) from other block groups without
1461  * any specific goal block.
1462  *
1463  */
1464 ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
1465                         ext4_fsblk_t goal, unsigned long *count, int *errp)
1466 {
1467         struct buffer_head *bitmap_bh = NULL;
1468         struct buffer_head *gdp_bh;
1469         unsigned long group_no;
1470         int goal_group;
1471         ext4_grpblk_t grp_target_blk;   /* blockgroup relative goal block */
1472         ext4_grpblk_t grp_alloc_blk;    /* blockgroup-relative allocated block*/
1473         ext4_fsblk_t ret_block;         /* filesyetem-wide allocated block */
1474         int bgi;                        /* blockgroup iteration index */
1475         int fatal = 0, err;
1476         int performed_allocation = 0;
1477         ext4_grpblk_t free_blocks;      /* number of free blocks in a group */
1478         struct super_block *sb;
1479         struct ext4_group_desc *gdp;
1480         struct ext4_super_block *es;
1481         struct ext4_sb_info *sbi;
1482         struct ext4_reserve_window_node *my_rsv = NULL;
1483         struct ext4_block_alloc_info *block_i;
1484         unsigned short windowsz = 0;
1485 #ifdef EXT4FS_DEBUG
1486         static int goal_hits, goal_attempts;
1487 #endif
1488         unsigned long ngroups;
1489         unsigned long num = *count;
1490
1491         *errp = -ENOSPC;
1492         sb = inode->i_sb;
1493         if (!sb) {
1494                 printk("ext4_new_block: nonexistent device");
1495                 return 0;
1496         }
1497
1498         /*
1499          * Check quota for allocation of this block.
1500          */
1501         if (DQUOT_ALLOC_BLOCK(inode, num)) {
1502                 *errp = -EDQUOT;
1503                 return 0;
1504         }
1505
1506         sbi = EXT4_SB(sb);
1507         es = EXT4_SB(sb)->s_es;
1508         ext4_debug("goal=%lu.\n", goal);
1509         /*
1510          * Allocate a block from reservation only when
1511          * filesystem is mounted with reservation(default,-o reservation), and
1512          * it's a regular file, and
1513          * the desired window size is greater than 0 (One could use ioctl
1514          * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
1515          * reservation on that particular file)
1516          */
1517         block_i = EXT4_I(inode)->i_block_alloc_info;
1518         if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1519                 my_rsv = &block_i->rsv_window_node;
1520
1521         if (!ext4_has_free_blocks(sbi)) {
1522                 *errp = -ENOSPC;
1523                 goto out;
1524         }
1525
1526         /*
1527          * First, test whether the goal block is free.
1528          */
1529         if (goal < le32_to_cpu(es->s_first_data_block) ||
1530             goal >= ext4_blocks_count(es))
1531                 goal = le32_to_cpu(es->s_first_data_block);
1532         ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1533         goal_group = group_no;
1534 retry_alloc:
1535         gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1536         if (!gdp)
1537                 goto io_error;
1538
1539         free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1540         /*
1541          * if there is not enough free blocks to make a new resevation
1542          * turn off reservation for this allocation
1543          */
1544         if (my_rsv && (free_blocks < windowsz)
1545                 && (rsv_is_empty(&my_rsv->rsv_window)))
1546                 my_rsv = NULL;
1547
1548         if (free_blocks > 0) {
1549                 bitmap_bh = read_block_bitmap(sb, group_no);
1550                 if (!bitmap_bh)
1551                         goto io_error;
1552                 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1553                                         group_no, bitmap_bh, grp_target_blk,
1554                                         my_rsv, &num, &fatal);
1555                 if (fatal)
1556                         goto out;
1557                 if (grp_alloc_blk >= 0)
1558                         goto allocated;
1559         }
1560
1561         ngroups = EXT4_SB(sb)->s_groups_count;
1562         smp_rmb();
1563
1564         /*
1565          * Now search the rest of the groups.  We assume that
1566          * i and gdp correctly point to the last group visited.
1567          */
1568         for (bgi = 0; bgi < ngroups; bgi++) {
1569                 group_no++;
1570                 if (group_no >= ngroups)
1571                         group_no = 0;
1572                 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1573                 if (!gdp)
1574                         goto io_error;
1575                 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1576                 /*
1577                  * skip this group if the number of
1578                  * free blocks is less than half of the reservation
1579                  * window size.
1580                  */
1581                 if (free_blocks <= (windowsz/2))
1582                         continue;
1583
1584                 brelse(bitmap_bh);
1585                 bitmap_bh = read_block_bitmap(sb, group_no);
1586                 if (!bitmap_bh)
1587                         goto io_error;
1588                 /*
1589                  * try to allocate block(s) from this group, without a goal(-1).
1590                  */
1591                 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1592                                         group_no, bitmap_bh, -1, my_rsv,
1593                                         &num, &fatal);
1594                 if (fatal)
1595                         goto out;
1596                 if (grp_alloc_blk >= 0)
1597                         goto allocated;
1598         }
1599         /*
1600          * We may end up a bogus ealier ENOSPC error due to
1601          * filesystem is "full" of reservations, but
1602          * there maybe indeed free blocks avaliable on disk
1603          * In this case, we just forget about the reservations
1604          * just do block allocation as without reservations.
1605          */
1606         if (my_rsv) {
1607                 my_rsv = NULL;
1608                 windowsz = 0;
1609                 group_no = goal_group;
1610                 goto retry_alloc;
1611         }
1612         /* No space left on the device */
1613         *errp = -ENOSPC;
1614         goto out;
1615
1616 allocated:
1617
1618         ext4_debug("using block group %d(%d)\n",
1619                         group_no, gdp->bg_free_blocks_count);
1620
1621         BUFFER_TRACE(gdp_bh, "get_write_access");
1622         fatal = ext4_journal_get_write_access(handle, gdp_bh);
1623         if (fatal)
1624                 goto out;
1625
1626         ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1627
1628         if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1629             in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1630             in_range(ret_block, ext4_inode_table(sb, gdp),
1631                      EXT4_SB(sb)->s_itb_per_group) ||
1632             in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1633                      EXT4_SB(sb)->s_itb_per_group))
1634                 ext4_error(sb, "ext4_new_block",
1635                             "Allocating block in system zone - "
1636                             "blocks from %llu, length %lu",
1637                              ret_block, num);
1638
1639         performed_allocation = 1;
1640
1641 #ifdef CONFIG_JBD2_DEBUG
1642         {
1643                 struct buffer_head *debug_bh;
1644
1645                 /* Record bitmap buffer state in the newly allocated block */
1646                 debug_bh = sb_find_get_block(sb, ret_block);
1647                 if (debug_bh) {
1648                         BUFFER_TRACE(debug_bh, "state when allocated");
1649                         BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1650                         brelse(debug_bh);
1651                 }
1652         }
1653         jbd_lock_bh_state(bitmap_bh);
1654         spin_lock(sb_bgl_lock(sbi, group_no));
1655         if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1656                 int i;
1657
1658                 for (i = 0; i < num; i++) {
1659                         if (ext4_test_bit(grp_alloc_blk+i,
1660                                         bh2jh(bitmap_bh)->b_committed_data)) {
1661                                 printk("%s: block was unexpectedly set in "
1662                                         "b_committed_data\n", __FUNCTION__);
1663                         }
1664                 }
1665         }
1666         ext4_debug("found bit %d\n", grp_alloc_blk);
1667         spin_unlock(sb_bgl_lock(sbi, group_no));
1668         jbd_unlock_bh_state(bitmap_bh);
1669 #endif
1670
1671         if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1672                 ext4_error(sb, "ext4_new_block",
1673                             "block(%llu) >= blocks count(%llu) - "
1674                             "block_group = %lu, es == %p ", ret_block,
1675                         ext4_blocks_count(es), group_no, es);
1676                 goto out;
1677         }
1678
1679         /*
1680          * It is up to the caller to add the new buffer to a journal
1681          * list of some description.  We don't know in advance whether
1682          * the caller wants to use it as metadata or data.
1683          */
1684         ext4_debug("allocating block %lu. Goal hits %d of %d.\n",
1685                         ret_block, goal_hits, goal_attempts);
1686
1687         spin_lock(sb_bgl_lock(sbi, group_no));
1688         gdp->bg_free_blocks_count =
1689                         cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1690         spin_unlock(sb_bgl_lock(sbi, group_no));
1691         percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1692
1693         BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1694         err = ext4_journal_dirty_metadata(handle, gdp_bh);
1695         if (!fatal)
1696                 fatal = err;
1697
1698         sb->s_dirt = 1;
1699         if (fatal)
1700                 goto out;
1701
1702         *errp = 0;
1703         brelse(bitmap_bh);
1704         DQUOT_FREE_BLOCK(inode, *count-num);
1705         *count = num;
1706         return ret_block;
1707
1708 io_error:
1709         *errp = -EIO;
1710 out:
1711         if (fatal) {
1712                 *errp = fatal;
1713                 ext4_std_error(sb, fatal);
1714         }
1715         /*
1716          * Undo the block allocation
1717          */
1718         if (!performed_allocation)
1719                 DQUOT_FREE_BLOCK(inode, *count);
1720         brelse(bitmap_bh);
1721         return 0;
1722 }
1723
1724 ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
1725                         ext4_fsblk_t goal, int *errp)
1726 {
1727         unsigned long count = 1;
1728
1729         return ext4_new_blocks(handle, inode, goal, &count, errp);
1730 }
1731
1732 /**
1733  * ext4_count_free_blocks() -- count filesystem free blocks
1734  * @sb:         superblock
1735  *
1736  * Adds up the number of free blocks from each block group.
1737  */
1738 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
1739 {
1740         ext4_fsblk_t desc_count;
1741         struct ext4_group_desc *gdp;
1742         int i;
1743         unsigned long ngroups = EXT4_SB(sb)->s_groups_count;
1744 #ifdef EXT4FS_DEBUG
1745         struct ext4_super_block *es;
1746         ext4_fsblk_t bitmap_count;
1747         unsigned long x;
1748         struct buffer_head *bitmap_bh = NULL;
1749
1750         es = EXT4_SB(sb)->s_es;
1751         desc_count = 0;
1752         bitmap_count = 0;
1753         gdp = NULL;
1754
1755         smp_rmb();
1756         for (i = 0; i < ngroups; i++) {
1757                 gdp = ext4_get_group_desc(sb, i, NULL);
1758                 if (!gdp)
1759                         continue;
1760                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1761                 brelse(bitmap_bh);
1762                 bitmap_bh = read_block_bitmap(sb, i);
1763                 if (bitmap_bh == NULL)
1764                         continue;
1765
1766                 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
1767                 printk("group %d: stored = %d, counted = %lu\n",
1768                         i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1769                 bitmap_count += x;
1770         }
1771         brelse(bitmap_bh);
1772         printk("ext4_count_free_blocks: stored = %llu"
1773                 ", computed = %llu, %llu\n",
1774                EXT4_FREE_BLOCKS_COUNT(es),
1775                 desc_count, bitmap_count);
1776         return bitmap_count;
1777 #else
1778         desc_count = 0;
1779         smp_rmb();
1780         for (i = 0; i < ngroups; i++) {
1781                 gdp = ext4_get_group_desc(sb, i, NULL);
1782                 if (!gdp)
1783                         continue;
1784                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1785         }
1786
1787         return desc_count;
1788 #endif
1789 }
1790
1791 static inline int test_root(int a, int b)
1792 {
1793         int num = b;
1794
1795         while (a > num)
1796                 num *= b;
1797         return num == a;
1798 }
1799
1800 static int ext4_group_sparse(int group)
1801 {
1802         if (group <= 1)
1803                 return 1;
1804         if (!(group & 1))
1805                 return 0;
1806         return (test_root(group, 7) || test_root(group, 5) ||
1807                 test_root(group, 3));
1808 }
1809
1810 /**
1811  *      ext4_bg_has_super - number of blocks used by the superblock in group
1812  *      @sb: superblock for filesystem
1813  *      @group: group number to check
1814  *
1815  *      Return the number of blocks used by the superblock (primary or backup)
1816  *      in this group.  Currently this will be only 0 or 1.
1817  */
1818 int ext4_bg_has_super(struct super_block *sb, int group)
1819 {
1820         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1821                                 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1822                         !ext4_group_sparse(group))
1823                 return 0;
1824         return 1;
1825 }
1826
1827 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group)
1828 {
1829         unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1830         unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);
1831         unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
1832
1833         if (group == first || group == first + 1 || group == last)
1834                 return 1;
1835         return 0;
1836 }
1837
1838 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group)
1839 {
1840         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1841                                 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1842                         !ext4_group_sparse(group))
1843                 return 0;
1844         return EXT4_SB(sb)->s_gdb_count;
1845 }
1846
1847 /**
1848  *      ext4_bg_num_gdb - number of blocks used by the group table in group
1849  *      @sb: superblock for filesystem
1850  *      @group: group number to check
1851  *
1852  *      Return the number of blocks used by the group descriptor table
1853  *      (primary or backup) in this group.  In the future there may be a
1854  *      different number of descriptor blocks in each group.
1855  */
1856 unsigned long ext4_bg_num_gdb(struct super_block *sb, int group)
1857 {
1858         unsigned long first_meta_bg =
1859                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
1860         unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1861
1862         if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
1863                         metagroup < first_meta_bg)
1864                 return ext4_bg_num_gdb_nometa(sb,group);
1865
1866         return ext4_bg_num_gdb_meta(sb,group);
1867
1868 }