]> err.no Git - linux-2.6/blobdiff - mm/slab.c
[PATCH] kconfig: add "void conf_set_changed_callback(void (*fn)(void))", use it in...
[linux-2.6] / mm / slab.c
index 86f5d6e995bbb98ac56785efab48091d4c611133..2c655532f5efcefac182e20cd2bb325decb92407 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/nodemask.h>
 #include       <linux/mempolicy.h>
 #include       <linux/mutex.h>
+#include       <linux/fault-inject.h>
 #include       <linux/rtmutex.h>
 
 #include       <asm/cacheflush.h>
@@ -945,7 +946,8 @@ static void __devinit start_cpu_timer(int cpu)
        if (keventd_up() && reap_work->work.func == NULL) {
                init_reap_node(cpu);
                INIT_DELAYED_WORK(reap_work, cache_reap);
-               schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
+               schedule_delayed_work_on(cpu, reap_work,
+                                       __round_jiffies_relative(HZ, cpu));
        }
 }
 
@@ -3088,12 +3090,89 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
+#ifdef CONFIG_FAILSLAB
+
+static struct failslab_attr {
+
+       struct fault_attr attr;
+
+       u32 ignore_gfp_wait;
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+       struct dentry *ignore_gfp_wait_file;
+#endif
+
+} failslab = {
+       .attr = FAULT_ATTR_INITIALIZER,
+       .ignore_gfp_wait = 1,
+};
+
+static int __init setup_failslab(char *str)
+{
+       return setup_fault_attr(&failslab.attr, str);
+}
+__setup("failslab=", setup_failslab);
+
+static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
+{
+       if (cachep == &cache_cache)
+               return 0;
+       if (flags & __GFP_NOFAIL)
+               return 0;
+       if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
+               return 0;
+
+       return should_fail(&failslab.attr, obj_size(cachep));
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init failslab_debugfs(void)
+{
+       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       struct dentry *dir;
+       int err;
+
+               err = init_fault_attr_dentries(&failslab.attr, "failslab");
+       if (err)
+               return err;
+       dir = failslab.attr.dentries.dir;
+
+       failslab.ignore_gfp_wait_file =
+               debugfs_create_bool("ignore-gfp-wait", mode, dir,
+                                     &failslab.ignore_gfp_wait);
+
+       if (!failslab.ignore_gfp_wait_file) {
+               err = -ENOMEM;
+               debugfs_remove(failslab.ignore_gfp_wait_file);
+               cleanup_fault_attr_dentries(&failslab.attr);
+       }
+
+       return err;
+}
+
+late_initcall(failslab_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else /* CONFIG_FAILSLAB */
+
+static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
+{
+       return 0;
+}
+
+#endif /* CONFIG_FAILSLAB */
+
 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
        void *objp;
        struct array_cache *ac;
 
        check_irq_off();
+
+       if (should_failslab(cachep, flags))
+               return NULL;
+
        ac = cpu_cache_get(cachep);
        if (likely(ac->avail)) {
                STATS_INC_ALLOCHIT(cachep);
@@ -3182,7 +3261,7 @@ retry:
        for (z = zonelist->zones; *z && !obj; z++) {
                nid = zone_to_nid(*z);
 
-               if (cpuset_zone_allowed(*z, flags) &&
+               if (cpuset_zone_allowed(*z, flags | __GFP_HARDWALL) &&
                        cache->nodelists[nid] &&
                        cache->nodelists[nid]->free_objects)
                                obj = ____cache_alloc_node(cache,
@@ -3928,7 +4007,7 @@ static void cache_reap(struct work_struct *unused)
        if (!mutex_trylock(&cache_chain_mutex)) {
                /* Give up. Setup the next iteration. */
                schedule_delayed_work(&__get_cpu_var(reap_work),
-                                     REAPTIMEOUT_CPUC);
+                                     round_jiffies_relative(REAPTIMEOUT_CPUC));
                return;
        }
 
@@ -3974,7 +4053,8 @@ next:
        next_reap_node();
        refresh_cpu_vm_stats(smp_processor_id());
        /* Set up the next iteration */
-       schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
+       schedule_delayed_work(&__get_cpu_var(reap_work),
+               round_jiffies_relative(REAPTIMEOUT_CPUC));
 }
 
 #ifdef CONFIG_PROC_FS
@@ -4142,7 +4222,7 @@ static int s_show(struct seq_file *m, void *p)
  * + further values on SMP and with statistics enabled
  */
 
-struct seq_operations slabinfo_op = {
+const struct seq_operations slabinfo_op = {
        .start = s_start,
        .next = s_next,
        .stop = s_stop,
@@ -4340,7 +4420,7 @@ static int leaks_show(struct seq_file *m, void *p)
        return 0;
 }
 
-struct seq_operations slabstats_op = {
+const struct seq_operations slabstats_op = {
        .start = leaks_start,
        .next = s_next,
        .stop = s_stop,