]> err.no Git - linux-2.6/blobdiff - mm/slab.c
Make vm statistics update interval configurable
[linux-2.6] / mm / slab.c
index acda7e2d66e4e9f4f6b7513a7c00cf91bf0873c4..e50908b2bfac8d33948fdb70422f98030e74f5d9 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1186,8 +1186,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
        int memsize = sizeof(struct kmem_list3);
 
        switch (action) {
-       case CPU_UP_PREPARE:
+       case CPU_LOCK_ACQUIRE:
                mutex_lock(&cache_chain_mutex);
+               break;
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                /*
                 * We need to do this right in the beginning since
                 * alloc_arraycache's are going to use this list.
@@ -1274,17 +1277,28 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                }
                break;
        case CPU_ONLINE:
-               mutex_unlock(&cache_chain_mutex);
+       case CPU_ONLINE_FROZEN:
                start_cpu_timer(cpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DOWN_PREPARE:
-               mutex_lock(&cache_chain_mutex);
-               break;
-       case CPU_DOWN_FAILED:
-               mutex_unlock(&cache_chain_mutex);
-               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               /*
+                * Shutdown cache reaper. Note that the cache_chain_mutex is
+                * held so that if cache_reap() is invoked it cannot do
+                * anything expensive but will only modify reap_work
+                * and reschedule the timer.
+               */
+               cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
+               /* Now the cache_reaper is guaranteed to be not running. */
+               per_cpu(reap_work, cpu).work.func = NULL;
+               break;
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               start_cpu_timer(cpu);
+               break;
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /*
                 * Even if all the cpus of a node are down, we don't free the
                 * kmem_list3 of any cache. This to avoid a race between
@@ -1296,6 +1310,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                /* fall thru */
 #endif
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
                        struct array_cache *shared;
@@ -1354,6 +1369,8 @@ free_array_cache:
                                continue;
                        drain_freelist(cachep, l3, l3->free_objects);
                }
+               break;
+       case CPU_LOCK_RELEASE:
                mutex_unlock(&cache_chain_mutex);
                break;
        }
@@ -3742,7 +3759,6 @@ EXPORT_SYMBOL(__kmalloc);
 
 /**
  * krealloc - reallocate memory. The contents will remain unchanged.
- *
  * @p: object to reallocate memory for.
  * @new_size: how many bytes of memory are required.
  * @flags: the type of memory to allocate.
@@ -4140,7 +4156,6 @@ next:
        check_irq_on();
        mutex_unlock(&cache_chain_mutex);
        next_reap_node();
-       refresh_cpu_vm_stats(smp_processor_id());
 out:
        /* Set up the next iteration */
        schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));