]> err.no Git - linux-2.6/commitdiff
[PATCH] Manage jbd allocations from its own slabs
authorBadari Pulavarty <pbadari@us.ibm.com>
Sun, 27 Aug 2006 08:23:52 +0000 (01:23 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 27 Aug 2006 18:01:32 +0000 (11:01 -0700)
JBD currently allocates commit and frozen buffers from slabs.  With
CONFIG_SLAB_DEBUG, its possible for an allocation to cross the page
boundary causing IO problems.

https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=200127

So, instead of allocating these from regular slabs - manage allocation from
its own slabs and disable slab debug for these slabs.

[akpm@osdl.org: cleanups]
Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/transaction.c
include/linux/jbd.h

index 0971814c38b8572eca8ca90bbce4d7f307b69fb9..42da6078431192aafdd68dbfe712afad1b9faa3e 100644 (file)
@@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal)
                        struct buffer_head *bh = jh2bh(jh);
 
                        jbd_lock_bh_state(bh);
-                       kfree(jh->b_committed_data);
+                       jbd_slab_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        jbd_unlock_bh_state(bh);
                }
@@ -745,14 +745,14 @@ restart_loop:
                 * Otherwise, we can just throw away the frozen data now.
                 */
                if (jh->b_committed_data) {
-                       kfree(jh->b_committed_data);
+                       jbd_slab_free(jh->b_committed_data, bh->b_size);
                        jh->b_committed_data = NULL;
                        if (jh->b_frozen_data) {
                                jh->b_committed_data = jh->b_frozen_data;
                                jh->b_frozen_data = NULL;
                        }
                } else if (jh->b_frozen_data) {
-                       kfree(jh->b_frozen_data);
+                       jbd_slab_free(jh->b_frozen_data, bh->b_size);
                        jh->b_frozen_data = NULL;
                }
 
index 8c9b28dff1197d85a3a720f2988b25834f7c495b..f66724ce443a6ab7be44db95aa68f9359c451427 100644 (file)
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
+static int journal_create_jbd_slab(size_t slab_size);
 
 /*
  * Helper function used to manage commit timeouts
@@ -328,10 +329,10 @@ repeat:
                char *tmp;
 
                jbd_unlock_bh_state(bh_in);
-               tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
+               tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
                jbd_lock_bh_state(bh_in);
                if (jh_in->b_frozen_data) {
-                       kfree(tmp);
+                       jbd_slab_free(tmp, bh_in->b_size);
                        goto repeat;
                }
 
@@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal)
 int journal_load(journal_t *journal)
 {
        int err;
+       journal_superblock_t *sb;
 
        err = load_superblock(journal);
        if (err)
                return err;
 
+       sb = journal->j_superblock;
        /* If this is a V2 superblock, then we have to check the
         * features flags on it. */
 
        if (journal->j_format_version >= 2) {
-               journal_superblock_t *sb = journal->j_superblock;
-
                if ((sb->s_feature_ro_compat &
                     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
                    (sb->s_feature_incompat &
@@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal)
                }
        }
 
+       /*
+        * Create a slab for this blocksize
+        */
+       err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
+       if (err)
+               return err;
+
        /* Let the recovery code check whether it needs to recover any
         * data from the journal. */
        if (journal_recover(journal))
@@ -1611,6 +1619,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
        return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
 }
 
+/*
+ * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
+ * and allocate frozen and commit buffers from these slabs.
+ *
+ * Reason for doing this is to avoid, SLAB_DEBUG - since it could
+ * cause bh to cross page boundary.
+ */
+
+#define JBD_MAX_SLABS 5
+#define JBD_SLAB_INDEX(size)  (size >> 11)
+
+static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static const char *jbd_slab_names[JBD_MAX_SLABS] = {
+       "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
+};
+
+static void journal_destroy_jbd_slabs(void)
+{
+       int i;
+
+       for (i = 0; i < JBD_MAX_SLABS; i++) {
+               if (jbd_slab[i])
+                       kmem_cache_destroy(jbd_slab[i]);
+               jbd_slab[i] = NULL;
+       }
+}
+
+static int journal_create_jbd_slab(size_t slab_size)
+{
+       int i = JBD_SLAB_INDEX(slab_size);
+
+       BUG_ON(i >= JBD_MAX_SLABS);
+
+       /*
+        * Check if we already have a slab created for this size
+        */
+       if (jbd_slab[i])
+               return 0;
+
+       /*
+        * Create a slab and force alignment to be same as slabsize -
+        * this will make sure that allocations won't cross the page
+        * boundary.
+        */
+       jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
+                               slab_size, slab_size, 0, NULL, NULL);
+       if (!jbd_slab[i]) {
+               printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void * jbd_slab_alloc(size_t size, gfp_t flags)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
+}
+
+void jbd_slab_free(void *ptr,  size_t size)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       kmem_cache_free(jbd_slab[idx], ptr);
+}
+
 /*
  * Journal_head storage management
  */
@@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
                                                __FUNCTION__);
-                               kfree(jh->b_frozen_data);
+                               jbd_slab_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
                                                __FUNCTION__);
-                               kfree(jh->b_committed_data);
+                               jbd_slab_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
                        jh->b_bh = NULL;        /* debug, really */
@@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void)
        journal_destroy_revoke_caches();
        journal_destroy_journal_head_cache();
        journal_destroy_handle_cache();
+       journal_destroy_jbd_slabs();
 }
 
 static int __init journal_init(void)
index 508b2ea91f43d090dca79617ac2060b5e36f635a..de2e4cbbf79a7e0bfdec1b4751072c5ebeab16a4 100644 (file)
@@ -666,8 +666,9 @@ repeat:
                        if (!frozen_buffer) {
                                JBUFFER_TRACE(jh, "allocate memory for buffer");
                                jbd_unlock_bh_state(bh);
-                               frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size,
-                                                           GFP_NOFS);
+                               frozen_buffer =
+                                       jbd_slab_alloc(jh2bh(jh)->b_size,
+                                                        GFP_NOFS);
                                if (!frozen_buffer) {
                                        printk(KERN_EMERG
                                               "%s: OOM for frozen_buffer\n",
@@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
 
 repeat:
        if (!jh->b_committed_data) {
-               committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS);
+               committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
                if (!committed_data) {
                        printk(KERN_EMERG "%s: No memory for committed data\n",
                                __FUNCTION__);
@@ -906,7 +907,7 @@ repeat:
 out:
        journal_put_journal_head(jh);
        if (unlikely(committed_data))
-               kfree(committed_data);
+               jbd_slab_free(committed_data, bh->b_size);
        return err;
 }
 
index 20eb34403d0ce08fbde7126987645c86b4fa697c..a04c154c5207954c39d58ba2ce95169b37ea57ae 100644 (file)
@@ -72,6 +72,9 @@ extern int journal_enable_debug;
 #endif
 
 extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
+extern void * jbd_slab_alloc(size_t size, gfp_t flags);
+extern void jbd_slab_free(void *ptr, size_t size);
+
 #define jbd_kmalloc(size, flags) \
        __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
 #define jbd_rep_kmalloc(size, flags) \