2 * linux/fs/jbd/commit.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
16 #include <linux/time.h>
18 #include <linux/jbd.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
22 #include <linux/pagemap.h>
25 * Default IO end handler for temporary BJ_IO buffer_heads.
27 static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
31 set_buffer_uptodate(bh);
33 clear_buffer_uptodate(bh);
38 * When an ext3-ordered file is truncated, it is possible that many pages are
39 * not successfully freed, because they are attached to a committing transaction.
40 * After the transaction commits, these pages are left on the LRU, with no
41 * ->mapping, and with attached buffers. These pages are trivially reclaimable
42 * by the VM, but their apparent absence upsets the VM accounting, and it makes
43 * the numbers in /proc/meminfo look odd.
45 * So here, we have a buffer which has just come off the forget list. Look to
46 * see if we can strip all buffers from the backing page.
48 * Called under journal->j_list_lock. The caller provided us with a ref
49 * against the buffer, and we drop that here.
51 static void release_buffer_page(struct buffer_head *bh)
57 if (atomic_read(&bh->b_count) != 1)
65 /* OK, it's a truncated page */
66 if (TestSetPageLocked(page))
71 try_to_free_buffers(page);
73 page_cache_release(page);
81 * Decrement reference counter for data buffer. If it has been marked
82 * 'BH_Freed', release it and the page to which it belongs if possible.
84 static void release_data_buffer(struct buffer_head *bh)
86 if (buffer_freed(bh)) {
87 clear_buffer_freed(bh);
88 release_buffer_page(bh);
94 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
95 * held. For ranking reasons we must trylock. If we lose, schedule away and
96 * return 0. j_list_lock is dropped in this case.
98 static int inverted_lock(journal_t *journal, struct buffer_head *bh)
100 if (!jbd_trylock_bh_state(bh)) {
101 spin_unlock(&journal->j_list_lock);
108 /* Done it all: now write the commit record. We should have
109 * cleaned up our previous buffers by now, so if we are in abort
110 * mode we can now just skip the rest of the journal write
113 * Returns 1 if the journal needs to be aborted or 0 on success
115 static int journal_write_commit_record(journal_t *journal,
116 transaction_t *commit_transaction)
118 struct journal_head *descriptor;
119 struct buffer_head *bh;
120 journal_header_t *header;
122 int barrier_done = 0;
124 if (is_journal_aborted(journal))
127 descriptor = journal_get_descriptor_buffer(journal);
131 bh = jh2bh(descriptor);
133 header = (journal_header_t *)(bh->b_data);
134 header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
135 header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
136 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
138 JBUFFER_TRACE(descriptor, "write commit block");
139 set_buffer_dirty(bh);
140 if (journal->j_flags & JFS_BARRIER) {
141 set_buffer_ordered(bh);
144 ret = sync_dirty_buffer(bh);
146 clear_buffer_ordered(bh);
147 /* is it possible for another commit to fail at roughly
148 * the same time as this one? If so, we don't want to
149 * trust the barrier flag in the super, but instead want
150 * to remember if we sent a barrier request
152 if (ret == -EOPNOTSUPP && barrier_done) {
153 char b[BDEVNAME_SIZE];
156 "JBD: barrier-based sync failed on %s - "
157 "disabling barriers\n",
158 bdevname(journal->j_dev, b));
159 spin_lock(&journal->j_state_lock);
160 journal->j_flags &= ~JFS_BARRIER;
161 spin_unlock(&journal->j_state_lock);
163 /* And try again, without the barrier */
164 set_buffer_uptodate(bh);
165 set_buffer_dirty(bh);
166 ret = sync_dirty_buffer(bh);
168 put_bh(bh); /* One for getblk() */
169 journal_put_journal_head(descriptor);
171 return (ret == -EIO);
174 static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
178 for (i = 0; i < bufs; i++) {
179 wbuf[i]->b_end_io = end_buffer_write_sync;
180 /* We use-up our safety reference in submit_bh() */
181 submit_bh(WRITE, wbuf[i]);
186 * Submit all the data buffers to disk
188 static void journal_submit_data_buffers(journal_t *journal,
189 transaction_t *commit_transaction)
191 struct journal_head *jh;
192 struct buffer_head *bh;
195 struct buffer_head **wbuf = journal->j_wbuf;
198 * Whenever we unlock the journal and sleep, things can get added
199 * onto ->t_sync_datalist, so we have to keep looping back to
200 * write_out_data until we *know* that the list is empty.
202 * Cleanup any flushed data buffers from the data list. Even in
203 * abort mode, we want to flush this out as soon as possible.
207 spin_lock(&journal->j_list_lock);
209 while (commit_transaction->t_sync_datalist) {
210 jh = commit_transaction->t_sync_datalist;
214 /* Get reference just to make sure buffer does not disappear
215 * when we are forced to drop various locks */
217 /* If the buffer is dirty, we need to submit IO and hence
218 * we need the buffer lock. We try to lock the buffer without
219 * blocking. If we fail, we need to drop j_list_lock and do
220 * blocking lock_buffer().
222 if (buffer_dirty(bh)) {
223 if (test_set_buffer_locked(bh)) {
224 BUFFER_TRACE(bh, "needs blocking lock");
225 spin_unlock(&journal->j_list_lock);
226 /* Write out all data to prevent deadlocks */
227 journal_do_submit_data(wbuf, bufs);
230 spin_lock(&journal->j_list_lock);
234 /* We have to get bh_state lock. Again out of order, sigh. */
235 if (!inverted_lock(journal, bh)) {
236 jbd_lock_bh_state(bh);
237 spin_lock(&journal->j_list_lock);
239 /* Someone already cleaned up the buffer? */
241 || jh->b_transaction != commit_transaction
242 || jh->b_jlist != BJ_SyncData) {
243 jbd_unlock_bh_state(bh);
246 BUFFER_TRACE(bh, "already cleaned up");
247 release_data_buffer(bh);
250 if (locked && test_clear_buffer_dirty(bh)) {
251 BUFFER_TRACE(bh, "needs writeout, adding to array");
253 __journal_file_buffer(jh, commit_transaction,
255 jbd_unlock_bh_state(bh);
256 if (bufs == journal->j_wbufsize) {
257 spin_unlock(&journal->j_list_lock);
258 journal_do_submit_data(wbuf, bufs);
262 } else if (!locked && buffer_locked(bh)) {
263 __journal_file_buffer(jh, commit_transaction,
265 jbd_unlock_bh_state(bh);
268 BUFFER_TRACE(bh, "writeout complete: unfile");
269 __journal_unfile_buffer(jh);
270 jbd_unlock_bh_state(bh);
273 journal_remove_journal_head(bh);
274 /* One for our safety reference, other for
275 * journal_remove_journal_head() */
277 release_data_buffer(bh);
280 if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
281 spin_unlock(&journal->j_list_lock);
285 spin_unlock(&journal->j_list_lock);
286 journal_do_submit_data(wbuf, bufs);
290 * journal_commit_transaction
292 * The primary function for committing a transaction to the log. This
293 * function is called by the journal thread to begin a complete commit.
295 void journal_commit_transaction(journal_t *journal)
297 transaction_t *commit_transaction;
298 struct journal_head *jh, *new_jh, *descriptor;
299 struct buffer_head **wbuf = journal->j_wbuf;
303 unsigned long blocknr;
305 journal_header_t *header;
306 journal_block_tag_t *tag = NULL;
313 * First job: lock down the current transaction and wait for
314 * all outstanding updates to complete.
318 spin_lock(&journal->j_list_lock);
319 summarise_journal_usage(journal);
320 spin_unlock(&journal->j_list_lock);
323 /* Do we need to erase the effects of a prior journal_flush? */
324 if (journal->j_flags & JFS_FLUSHED) {
325 jbd_debug(3, "super block updated\n");
326 journal_update_superblock(journal, 1);
328 jbd_debug(3, "superblock not updated\n");
331 J_ASSERT(journal->j_running_transaction != NULL);
332 J_ASSERT(journal->j_committing_transaction == NULL);
334 commit_transaction = journal->j_running_transaction;
335 J_ASSERT(commit_transaction->t_state == T_RUNNING);
337 jbd_debug(1, "JBD: starting commit of transaction %d\n",
338 commit_transaction->t_tid);
340 spin_lock(&journal->j_state_lock);
341 commit_transaction->t_state = T_LOCKED;
343 spin_lock(&commit_transaction->t_handle_lock);
344 while (commit_transaction->t_updates) {
347 prepare_to_wait(&journal->j_wait_updates, &wait,
348 TASK_UNINTERRUPTIBLE);
349 if (commit_transaction->t_updates) {
350 spin_unlock(&commit_transaction->t_handle_lock);
351 spin_unlock(&journal->j_state_lock);
353 spin_lock(&journal->j_state_lock);
354 spin_lock(&commit_transaction->t_handle_lock);
356 finish_wait(&journal->j_wait_updates, &wait);
358 spin_unlock(&commit_transaction->t_handle_lock);
360 J_ASSERT (commit_transaction->t_outstanding_credits <=
361 journal->j_max_transaction_buffers);
364 * First thing we are allowed to do is to discard any remaining
365 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
366 * that there are no such buffers: if a large filesystem
367 * operation like a truncate needs to split itself over multiple
368 * transactions, then it may try to do a journal_restart() while
369 * there are still BJ_Reserved buffers outstanding. These must
370 * be released cleanly from the current transaction.
372 * In this case, the filesystem must still reserve write access
373 * again before modifying the buffer in the new transaction, but
374 * we do not require it to remember exactly which old buffers it
375 * has reserved. This is consistent with the existing behaviour
376 * that multiple journal_get_write_access() calls to the same
377 * buffer are perfectly permissable.
379 while (commit_transaction->t_reserved_list) {
380 jh = commit_transaction->t_reserved_list;
381 JBUFFER_TRACE(jh, "reserved, unused: refile");
383 * A journal_get_undo_access()+journal_release_buffer() may
384 * leave undo-committed data.
386 if (jh->b_committed_data) {
387 struct buffer_head *bh = jh2bh(jh);
389 jbd_lock_bh_state(bh);
390 jbd_free(jh->b_committed_data, bh->b_size);
391 jh->b_committed_data = NULL;
392 jbd_unlock_bh_state(bh);
394 journal_refile_buffer(journal, jh);
398 * Now try to drop any written-back buffers from the journal's
399 * checkpoint lists. We do this *before* commit because it potentially
402 spin_lock(&journal->j_list_lock);
403 __journal_clean_checkpoint_list(journal);
404 spin_unlock(&journal->j_list_lock);
406 jbd_debug (3, "JBD: commit phase 1\n");
409 * Switch to a new revoke table.
411 journal_switch_revoke_table(journal);
413 commit_transaction->t_state = T_FLUSH;
414 journal->j_committing_transaction = commit_transaction;
415 journal->j_running_transaction = NULL;
416 commit_transaction->t_log_start = journal->j_head;
417 wake_up(&journal->j_wait_transaction_locked);
418 spin_unlock(&journal->j_state_lock);
420 jbd_debug (3, "JBD: commit phase 2\n");
423 * Now start flushing things to disk, in the order they appear
424 * on the transaction lists. Data blocks go first.
427 journal_submit_data_buffers(journal, commit_transaction);
430 * Wait for all previously submitted IO to complete.
432 spin_lock(&journal->j_list_lock);
433 while (commit_transaction->t_locked_list) {
434 struct buffer_head *bh;
436 jh = commit_transaction->t_locked_list->b_tprev;
439 if (buffer_locked(bh)) {
440 spin_unlock(&journal->j_list_lock);
442 if (unlikely(!buffer_uptodate(bh)))
444 spin_lock(&journal->j_list_lock);
446 if (!inverted_lock(journal, bh)) {
448 spin_lock(&journal->j_list_lock);
451 if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
452 __journal_unfile_buffer(jh);
453 jbd_unlock_bh_state(bh);
454 journal_remove_journal_head(bh);
457 jbd_unlock_bh_state(bh);
459 release_data_buffer(bh);
460 cond_resched_lock(&journal->j_list_lock);
462 spin_unlock(&journal->j_list_lock);
465 journal_abort(journal, err);
467 journal_write_revoke_records(journal, commit_transaction);
470 * If we found any dirty or locked buffers, then we should have
471 * looped back up to the write_out_data label. If there weren't
472 * any then journal_clean_data_list should have wiped the list
473 * clean by now, so check that it is in fact empty.
475 J_ASSERT (commit_transaction->t_sync_datalist == NULL);
477 jbd_debug (3, "JBD: commit phase 3\n");
480 * Way to go: we have now written out all of the data for a
481 * transaction! Now comes the tricky part: we need to write out
482 * metadata. Loop over the transaction's entire buffer list:
484 spin_lock(&journal->j_state_lock);
485 commit_transaction->t_state = T_COMMIT;
486 spin_unlock(&journal->j_state_lock);
488 J_ASSERT(commit_transaction->t_nr_buffers <=
489 commit_transaction->t_outstanding_credits);
493 while (commit_transaction->t_buffers) {
495 /* Find the next buffer to be journaled... */
497 jh = commit_transaction->t_buffers;
499 /* If we're in abort mode, we just un-journal the buffer and
500 release it for background writing. */
502 if (is_journal_aborted(journal)) {
503 JBUFFER_TRACE(jh, "journal is aborting: refile");
504 journal_refile_buffer(journal, jh);
505 /* If that was the last one, we need to clean up
506 * any descriptor buffers which may have been
507 * already allocated, even if we are now
509 if (!commit_transaction->t_buffers)
510 goto start_journal_io;
514 /* Make sure we have a descriptor block in which to
515 record the metadata buffer. */
518 struct buffer_head *bh;
520 J_ASSERT (bufs == 0);
522 jbd_debug(4, "JBD: get descriptor\n");
524 descriptor = journal_get_descriptor_buffer(journal);
526 journal_abort(journal, -EIO);
530 bh = jh2bh(descriptor);
531 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
532 (unsigned long long)bh->b_blocknr, bh->b_data);
533 header = (journal_header_t *)&bh->b_data[0];
534 header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
535 header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
536 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
538 tagp = &bh->b_data[sizeof(journal_header_t)];
539 space_left = bh->b_size - sizeof(journal_header_t);
541 set_buffer_jwrite(bh);
542 set_buffer_dirty(bh);
545 /* Record it so that we can wait for IO
547 BUFFER_TRACE(bh, "ph3: file as descriptor");
548 journal_file_buffer(descriptor, commit_transaction,
552 /* Where is the buffer to be written? */
554 err = journal_next_log_block(journal, &blocknr);
555 /* If the block mapping failed, just abandon the buffer
556 and repeat this loop: we'll fall into the
557 refile-on-abort condition above. */
559 journal_abort(journal, err);
564 * start_this_handle() uses t_outstanding_credits to determine
565 * the free space in the log, but this counter is changed
566 * by journal_next_log_block() also.
568 commit_transaction->t_outstanding_credits--;
570 /* Bump b_count to prevent truncate from stumbling over
571 the shadowed buffer! @@@ This can go if we ever get
572 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
573 atomic_inc(&jh2bh(jh)->b_count);
575 /* Make a temporary IO buffer with which to write it out
576 (this will requeue both the metadata buffer and the
577 temporary IO buffer). new_bh goes on BJ_IO*/
579 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
581 * akpm: journal_write_metadata_buffer() sets
582 * new_bh->b_transaction to commit_transaction.
583 * We need to clean this up before we release new_bh
584 * (which is of type BJ_IO)
586 JBUFFER_TRACE(jh, "ph3: write metadata");
587 flags = journal_write_metadata_buffer(commit_transaction,
588 jh, &new_jh, blocknr);
589 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
590 wbuf[bufs++] = jh2bh(new_jh);
592 /* Record the new block's tag in the current descriptor
597 tag_flag |= JFS_FLAG_ESCAPE;
599 tag_flag |= JFS_FLAG_SAME_UUID;
601 tag = (journal_block_tag_t *) tagp;
602 tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
603 tag->t_flags = cpu_to_be32(tag_flag);
604 tagp += sizeof(journal_block_tag_t);
605 space_left -= sizeof(journal_block_tag_t);
608 memcpy (tagp, journal->j_uuid, 16);
614 /* If there's no more to do, or if the descriptor is full,
617 if (bufs == journal->j_wbufsize ||
618 commit_transaction->t_buffers == NULL ||
619 space_left < sizeof(journal_block_tag_t) + 16) {
621 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
623 /* Write an end-of-descriptor marker before
624 submitting the IOs. "tag" still points to
625 the last tag we set up. */
627 tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
630 for (i = 0; i < bufs; i++) {
631 struct buffer_head *bh = wbuf[i];
633 clear_buffer_dirty(bh);
634 set_buffer_uptodate(bh);
635 bh->b_end_io = journal_end_buffer_io_sync;
636 submit_bh(WRITE, bh);
640 /* Force a new descriptor to be generated next
641 time round the loop. */
647 /* Lo and behold: we have just managed to send a transaction to
648 the log. Before we can commit it, wait for the IO so far to
649 complete. Control buffers being written are on the
650 transaction's t_log_list queue, and metadata buffers are on
651 the t_iobuf_list queue.
653 Wait for the buffers in reverse order. That way we are
654 less likely to be woken up until all IOs have completed, and
655 so we incur less scheduling load.
658 jbd_debug(3, "JBD: commit phase 4\n");
661 * akpm: these are BJ_IO, and j_list_lock is not needed.
662 * See __journal_try_to_free_buffer.
665 while (commit_transaction->t_iobuf_list != NULL) {
666 struct buffer_head *bh;
668 jh = commit_transaction->t_iobuf_list->b_tprev;
670 if (buffer_locked(bh)) {
677 if (unlikely(!buffer_uptodate(bh)))
680 clear_buffer_jwrite(bh);
682 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
683 journal_unfile_buffer(journal, jh);
686 * ->t_iobuf_list should contain only dummy buffer_heads
687 * which were created by journal_write_metadata_buffer().
689 BUFFER_TRACE(bh, "dumping temporary bh");
690 journal_put_journal_head(jh);
692 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
693 free_buffer_head(bh);
695 /* We also have to unlock and free the corresponding
697 jh = commit_transaction->t_shadow_list->b_tprev;
699 clear_bit(BH_JWrite, &bh->b_state);
700 J_ASSERT_BH(bh, buffer_jbddirty(bh));
702 /* The metadata is now released for reuse, but we need
703 to remember it against this transaction so that when
704 we finally commit, we can do any checkpointing
706 JBUFFER_TRACE(jh, "file as BJ_Forget");
707 journal_file_buffer(jh, commit_transaction, BJ_Forget);
708 /* Wake up any transactions which were waiting for this
710 wake_up_bit(&bh->b_state, BH_Unshadow);
711 JBUFFER_TRACE(jh, "brelse shadowed buffer");
715 J_ASSERT (commit_transaction->t_shadow_list == NULL);
717 jbd_debug(3, "JBD: commit phase 5\n");
719 /* Here we wait for the revoke record and descriptor record buffers */
721 while (commit_transaction->t_log_list != NULL) {
722 struct buffer_head *bh;
724 jh = commit_transaction->t_log_list->b_tprev;
726 if (buffer_locked(bh)) {
728 goto wait_for_ctlbuf;
731 goto wait_for_ctlbuf;
733 if (unlikely(!buffer_uptodate(bh)))
736 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
737 clear_buffer_jwrite(bh);
738 journal_unfile_buffer(journal, jh);
739 journal_put_journal_head(jh);
740 __brelse(bh); /* One for getblk */
741 /* AKPM: bforget here */
744 jbd_debug(3, "JBD: commit phase 6\n");
746 if (journal_write_commit_record(journal, commit_transaction))
750 journal_abort(journal, err);
752 /* End of a transaction! Finally, we can do checkpoint
753 processing: any buffers committed as a result of this
754 transaction can be removed from any checkpoint list it was on
757 jbd_debug(3, "JBD: commit phase 7\n");
759 J_ASSERT(commit_transaction->t_sync_datalist == NULL);
760 J_ASSERT(commit_transaction->t_buffers == NULL);
761 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
762 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
763 J_ASSERT(commit_transaction->t_shadow_list == NULL);
764 J_ASSERT(commit_transaction->t_log_list == NULL);
768 * As there are other places (journal_unmap_buffer()) adding buffers
769 * to this list we have to be careful and hold the j_list_lock.
771 spin_lock(&journal->j_list_lock);
772 while (commit_transaction->t_forget) {
773 transaction_t *cp_transaction;
774 struct buffer_head *bh;
776 jh = commit_transaction->t_forget;
777 spin_unlock(&journal->j_list_lock);
779 jbd_lock_bh_state(bh);
780 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
781 jh->b_transaction == journal->j_running_transaction);
784 * If there is undo-protected committed data against
785 * this buffer, then we can remove it now. If it is a
786 * buffer needing such protection, the old frozen_data
787 * field now points to a committed version of the
788 * buffer, so rotate that field to the new committed
791 * Otherwise, we can just throw away the frozen data now.
793 if (jh->b_committed_data) {
794 jbd_free(jh->b_committed_data, bh->b_size);
795 jh->b_committed_data = NULL;
796 if (jh->b_frozen_data) {
797 jh->b_committed_data = jh->b_frozen_data;
798 jh->b_frozen_data = NULL;
800 } else if (jh->b_frozen_data) {
801 jbd_free(jh->b_frozen_data, bh->b_size);
802 jh->b_frozen_data = NULL;
805 spin_lock(&journal->j_list_lock);
806 cp_transaction = jh->b_cp_transaction;
807 if (cp_transaction) {
808 JBUFFER_TRACE(jh, "remove from old cp transaction");
809 __journal_remove_checkpoint(jh);
812 /* Only re-checkpoint the buffer_head if it is marked
813 * dirty. If the buffer was added to the BJ_Forget list
814 * by journal_forget, it may no longer be dirty and
815 * there's no point in keeping a checkpoint record for
818 /* A buffer which has been freed while still being
819 * journaled by a previous transaction may end up still
820 * being dirty here, but we want to avoid writing back
821 * that buffer in the future now that the last use has
822 * been committed. That's not only a performance gain,
823 * it also stops aliasing problems if the buffer is left
824 * behind for writeback and gets reallocated for another
825 * use in a different page. */
826 if (buffer_freed(bh)) {
827 clear_buffer_freed(bh);
828 clear_buffer_jbddirty(bh);
831 if (buffer_jbddirty(bh)) {
832 JBUFFER_TRACE(jh, "add to new checkpointing trans");
833 __journal_insert_checkpoint(jh, commit_transaction);
834 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
835 __journal_refile_buffer(jh);
836 jbd_unlock_bh_state(bh);
838 J_ASSERT_BH(bh, !buffer_dirty(bh));
839 /* The buffer on BJ_Forget list and not jbddirty means
840 * it has been freed by this transaction and hence it
841 * could not have been reallocated until this
842 * transaction has committed. *BUT* it could be
843 * reallocated once we have written all the data to
844 * disk and before we process the buffer on BJ_Forget
846 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
847 __journal_refile_buffer(jh);
848 if (!jh->b_transaction) {
849 jbd_unlock_bh_state(bh);
851 journal_remove_journal_head(bh);
852 release_buffer_page(bh);
854 jbd_unlock_bh_state(bh);
856 cond_resched_lock(&journal->j_list_lock);
858 spin_unlock(&journal->j_list_lock);
860 * This is a bit sleazy. We use j_list_lock to protect transition
861 * of a transaction into T_FINISHED state and calling
862 * __journal_drop_transaction(). Otherwise we could race with
863 * other checkpointing code processing the transaction...
865 spin_lock(&journal->j_state_lock);
866 spin_lock(&journal->j_list_lock);
868 * Now recheck if some buffers did not get attached to the transaction
869 * while the lock was dropped...
871 if (commit_transaction->t_forget) {
872 spin_unlock(&journal->j_list_lock);
873 spin_unlock(&journal->j_state_lock);
877 /* Done with this transaction! */
879 jbd_debug(3, "JBD: commit phase 8\n");
881 J_ASSERT(commit_transaction->t_state == T_COMMIT);
883 commit_transaction->t_state = T_FINISHED;
884 J_ASSERT(commit_transaction == journal->j_committing_transaction);
885 journal->j_commit_sequence = commit_transaction->t_tid;
886 journal->j_committing_transaction = NULL;
887 spin_unlock(&journal->j_state_lock);
889 if (commit_transaction->t_checkpoint_list == NULL &&
890 commit_transaction->t_checkpoint_io_list == NULL) {
891 __journal_drop_transaction(journal, commit_transaction);
893 if (journal->j_checkpoint_transactions == NULL) {
894 journal->j_checkpoint_transactions = commit_transaction;
895 commit_transaction->t_cpnext = commit_transaction;
896 commit_transaction->t_cpprev = commit_transaction;
898 commit_transaction->t_cpnext =
899 journal->j_checkpoint_transactions;
900 commit_transaction->t_cpprev =
901 commit_transaction->t_cpnext->t_cpprev;
902 commit_transaction->t_cpnext->t_cpprev =
904 commit_transaction->t_cpprev->t_cpnext =
908 spin_unlock(&journal->j_list_lock);
910 jbd_debug(1, "JBD: commit %d complete, head %d\n",
911 journal->j_commit_sequence, journal->j_tail_sequence);
913 wake_up(&journal->j_wait_done_commit);