]> err.no Git - linux-2.6/blobdiff - fs/jbd/journal.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / fs / jbd / journal.c
index 06ab3c10b1b894af6cc03210a82c08f137c96024..5d9fec0b7ebd561501ed7973ff0c226c1db1aa2d 100644 (file)
@@ -83,7 +83,6 @@ EXPORT_SYMBOL(journal_force_commit);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
-static int journal_create_jbd_slab(size_t slab_size);
 
 /*
  * Helper function used to manage commit timeouts
@@ -218,7 +217,7 @@ static int journal_start_thread(journal_t *journal)
        if (IS_ERR(t))
                return PTR_ERR(t);
 
-       wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+       wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
        return 0;
 }
 
@@ -230,7 +229,8 @@ static void journal_kill_thread(journal_t *journal)
        while (journal->j_task) {
                wake_up(&journal->j_wait_commit);
                spin_unlock(&journal->j_state_lock);
-               wait_event(journal->j_wait_done_commit, journal->j_task == 0);
+               wait_event(journal->j_wait_done_commit,
+                               journal->j_task == NULL);
                spin_lock(&journal->j_state_lock);
        }
        spin_unlock(&journal->j_state_lock);
@@ -334,10 +334,10 @@ repeat:
                char *tmp;
 
                jbd_unlock_bh_state(bh_in);
-               tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
+               tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
                jbd_lock_bh_state(bh_in);
                if (jh_in->b_frozen_data) {
-                       jbd_slab_free(tmp, bh_in->b_size);
+                       jbd_free(tmp, bh_in->b_size);
                        goto repeat;
                }
 
@@ -654,7 +654,7 @@ static journal_t * journal_init_common (void)
        journal_t *journal;
        int err;
 
-       journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL);
+       journal = kmalloc(sizeof(*journal), GFP_KERNEL);
        if (!journal)
                goto fail;
        memset(journal, 0, sizeof(*journal));
@@ -1095,13 +1095,6 @@ int journal_load(journal_t *journal)
                }
        }
 
-       /*
-        * Create a slab for this blocksize
-        */
-       err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
-       if (err)
-               return err;
-
        /* Let the recovery code check whether it needs to recover any
         * data from the journal. */
        if (journal_recover(journal))
@@ -1614,86 +1607,6 @@ int journal_blocks_per_page(struct inode *inode)
        return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 }
 
-/*
- * Simple support for retrying memory allocations.  Introduced to help to
- * debug different VM deadlock avoidance strategies.
- */
-void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
-{
-       return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
-}
-
-/*
- * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
- * and allocate frozen and commit buffers from these slabs.
- *
- * Reason for doing this is to avoid, SLAB_DEBUG - since it could
- * cause bh to cross page boundary.
- */
-
-#define JBD_MAX_SLABS 5
-#define JBD_SLAB_INDEX(size)  (size >> 11)
-
-static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
-static const char *jbd_slab_names[JBD_MAX_SLABS] = {
-       "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
-};
-
-static void journal_destroy_jbd_slabs(void)
-{
-       int i;
-
-       for (i = 0; i < JBD_MAX_SLABS; i++) {
-               if (jbd_slab[i])
-                       kmem_cache_destroy(jbd_slab[i]);
-               jbd_slab[i] = NULL;
-       }
-}
-
-static int journal_create_jbd_slab(size_t slab_size)
-{
-       int i = JBD_SLAB_INDEX(slab_size);
-
-       BUG_ON(i >= JBD_MAX_SLABS);
-
-       /*
-        * Check if we already have a slab created for this size
-        */
-       if (jbd_slab[i])
-               return 0;
-
-       /*
-        * Create a slab and force alignment to be same as slabsize -
-        * this will make sure that allocations won't cross the page
-        * boundary.
-        */
-       jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
-                               slab_size, slab_size, 0, NULL);
-       if (!jbd_slab[i]) {
-               printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void * jbd_slab_alloc(size_t size, gfp_t flags)
-{
-       int idx;
-
-       idx = JBD_SLAB_INDEX(size);
-       BUG_ON(jbd_slab[idx] == NULL);
-       return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
-}
-
-void jbd_slab_free(void *ptr,  size_t size)
-{
-       int idx;
-
-       idx = JBD_SLAB_INDEX(size);
-       BUG_ON(jbd_slab[idx] == NULL);
-       kmem_cache_free(jbd_slab[idx], ptr);
-}
-
 /*
  * Journal_head storage management
  */
@@ -1710,7 +1623,7 @@ static int journal_init_journal_head_cache(void)
        journal_head_cache = kmem_cache_create("journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
-                               0,              /* flags */
+                               SLAB_TEMPORARY, /* flags */
                                NULL);          /* ctor */
        retval = 0;
        if (journal_head_cache == 0) {
@@ -1739,14 +1652,14 @@ static struct journal_head *journal_alloc_journal_head(void)
        atomic_inc(&nr_journal_heads);
 #endif
        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
-       if (ret == 0) {
+       if (ret == NULL) {
                jbd_debug(1, "out of memory for journal_head\n");
                if (time_after(jiffies, last_warning + 5*HZ)) {
                        printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
                               __FUNCTION__);
                        last_warning = jiffies;
                }
-               while (ret == 0) {
+               while (ret == NULL) {
                        yield();
                        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
                }
@@ -1881,13 +1794,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
                                                __FUNCTION__);
-                               jbd_slab_free(jh->b_frozen_data, bh->b_size);
+                               jbd_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
                                                __FUNCTION__);
-                               jbd_slab_free(jh->b_committed_data, bh->b_size);
+                               jbd_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
                        jh->b_bh = NULL;        /* debug, really */
@@ -2006,7 +1919,7 @@ static int __init journal_init_handle_cache(void)
        jbd_handle_cache = kmem_cache_create("journal_handle",
                                sizeof(handle_t),
                                0,              /* offset */
-                               0,              /* flags */
+                               SLAB_TEMPORARY, /* flags */
                                NULL);          /* ctor */
        if (jbd_handle_cache == NULL) {
                printk(KERN_EMERG "JBD: failed to create handle cache\n");
@@ -2042,7 +1955,6 @@ static void journal_destroy_caches(void)
        journal_destroy_revoke_caches();
        journal_destroy_journal_head_cache();
        journal_destroy_handle_cache();
-       journal_destroy_jbd_slabs();
 }
 
 static int __init journal_init(void)