]> err.no Git - linux-2.6/commitdiff
slub: Update statistics handling for variable order slabs
authorChristoph Lameter <clameter@sgi.com>
Mon, 14 Apr 2008 16:11:40 +0000 (19:11 +0300)
committerPekka Enberg <penberg@cs.helsinki.fi>
Sun, 27 Apr 2008 15:28:17 +0000 (18:28 +0300)
Change the statistics to consider that slabs of the same slabcache
can have different number of objects in them since they may be of
different order.

Provide a new sysfs field

total_objects

which shows the total objects that the allocated slabs of a slabcache
could hold.

Add a max field that holds the largest slab order that was ever used
for a slab cache.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Documentation/vm/slabinfo.c
include/linux/slub_def.h
mm/slub.c

index 22d7e3e4d60ce06df281d77d2b5638b76ded932b..d3ce295bffac59ce6ca9a4be032194210e5ac41f 100644 (file)
@@ -31,7 +31,7 @@ struct slabinfo {
        int hwcache_align, object_size, objs_per_slab;
        int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
-       unsigned long partial, objects, slabs;
+       unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;
        unsigned long free_fastpath, free_slowpath;
        unsigned long free_frozen, free_add_partial, free_remove_partial;
@@ -540,7 +540,8 @@ void slabcache(struct slabinfo *s)
                return;
 
        store_size(size_str, slab_size(s));
-       snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs, s->partial, s->cpu_slabs);
+       snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
+                                               s->partial, s->cpu_slabs);
 
        if (!line++)
                first_line();
@@ -776,7 +777,6 @@ void totals(void)
                unsigned long used;
                unsigned long long wasted;
                unsigned long long objwaste;
-               long long objects_in_partial_slabs;
                unsigned long percentage_partial_slabs;
                unsigned long percentage_partial_objs;
 
@@ -790,18 +790,11 @@ void totals(void)
                wasted = size - used;
                objwaste = s->slab_size - s->object_size;
 
-               objects_in_partial_slabs = s->objects -
-                       (s->slabs - s->partial - s ->cpu_slabs) *
-                       s->objs_per_slab;
-
-               if (objects_in_partial_slabs < 0)
-                       objects_in_partial_slabs = 0;
-
                percentage_partial_slabs = s->partial * 100 / s->slabs;
                if (percentage_partial_slabs > 100)
                        percentage_partial_slabs = 100;
 
-               percentage_partial_objs = objects_in_partial_slabs * 100
+               percentage_partial_objs = s->objects_partial * 100
                                                        / s->objects;
 
                if (percentage_partial_objs > 100)
@@ -823,8 +816,8 @@ void totals(void)
                        min_objects = s->objects;
                if (used < min_used)
                        min_used = used;
-               if (objects_in_partial_slabs < min_partobj)
-                       min_partobj = objects_in_partial_slabs;
+               if (s->objects_partial < min_partobj)
+                       min_partobj = s->objects_partial;
                if (percentage_partial_slabs < min_ppart)
                        min_ppart = percentage_partial_slabs;
                if (percentage_partial_objs < min_ppartobj)
@@ -848,8 +841,8 @@ void totals(void)
                        max_objects = s->objects;
                if (used > max_used)
                        max_used = used;
-               if (objects_in_partial_slabs > max_partobj)
-                       max_partobj = objects_in_partial_slabs;
+               if (s->objects_partial > max_partobj)
+                       max_partobj = s->objects_partial;
                if (percentage_partial_slabs > max_ppart)
                        max_ppart = percentage_partial_slabs;
                if (percentage_partial_objs > max_ppartobj)
@@ -864,7 +857,7 @@ void totals(void)
 
                total_objects += s->objects;
                total_used += used;
-               total_partobj += objects_in_partial_slabs;
+               total_partobj += s->objects_partial;
                total_ppart += percentage_partial_slabs;
                total_ppartobj += percentage_partial_objs;
 
@@ -1160,6 +1153,8 @@ void read_slab_dir(void)
                        slab->hwcache_align = get_obj("hwcache_align");
                        slab->object_size = get_obj("object_size");
                        slab->objects = get_obj("objects");
+                       slab->objects_partial = get_obj("objects_partial");
+                       slab->objects_total = get_obj("objects_total");
                        slab->objs_per_slab = get_obj("objs_per_slab");
                        slab->order = get_obj("order");
                        slab->partial = get_obj("partial");
index 4131e5fbd18be5239820273fbe3798362e54d6de..4236b5dee8122c6fd8bb5aa0b1ae5c0954f257c4 100644 (file)
@@ -48,6 +48,7 @@ struct kmem_cache_node {
        struct list_head partial;
 #ifdef CONFIG_SLUB_DEBUG
        atomic_long_t nr_slabs;
+       atomic_long_t total_objects;
        struct list_head full;
 #endif
 };
@@ -79,6 +80,7 @@ struct kmem_cache {
        struct kmem_cache_node local_node;
 
        /* Allocation and freeing of slabs */
+       struct kmem_cache_order_objects max;
        gfp_t allocflags;       /* gfp flags to use on each alloc */
        int refcount;           /* Refcount for slab cache destroy */
        void (*ctor)(struct kmem_cache *, void *);
index 0a220df5ed7c7ee0720a5e96f9af65407e45f2c9..c8514e93ffdf3324f7d64ee7bee1da5fb60e068d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -886,7 +886,7 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
        return atomic_long_read(&n->nr_slabs);
 }
 
-static inline void inc_slabs_node(struct kmem_cache *s, int node)
+static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
 {
        struct kmem_cache_node *n = get_node(s, node);
 
@@ -896,14 +896,17 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node)
         * dilemma by deferring the increment of the count during
         * bootstrap (see early_kmem_cache_node_alloc).
         */
-       if (!NUMA_BUILD || n)
+       if (!NUMA_BUILD || n) {
                atomic_long_inc(&n->nr_slabs);
+               atomic_long_add(objects, &n->total_objects);
+       }
 }
-static inline void dec_slabs_node(struct kmem_cache *s, int node)
+static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
 {
        struct kmem_cache_node *n = get_node(s, node);
 
        atomic_long_dec(&n->nr_slabs);
+       atomic_long_sub(objects, &n->total_objects);
 }
 
 /* Object debug checks for alloc/free paths */
@@ -1101,9 +1104,12 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
 
 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
                                                        { return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
+static inline void inc_slabs_node(struct kmem_cache *s, int node,
+                                                       int objects) {}
+static inline void dec_slabs_node(struct kmem_cache *s, int node,
+                                                       int objects) {}
 #endif
+
 /*
  * Slab allocation and freeing
  */
@@ -1155,7 +1161,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        if (!page)
                goto out;
 
-       inc_slabs_node(s, page_to_nid(page));
+       inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
        page->flags |= 1 << PG_slab;
        if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1230,7 +1236,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
 
 static void discard_slab(struct kmem_cache *s, struct page *page)
 {
-       dec_slabs_node(s, page_to_nid(page));
+       dec_slabs_node(s, page_to_nid(page), page->objects);
        free_slab(s, page);
 }
 
@@ -2144,7 +2150,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
        init_tracking(kmalloc_caches, n);
 #endif
        init_kmem_cache_node(n);
-       inc_slabs_node(kmalloc_caches, node);
+       inc_slabs_node(kmalloc_caches, node, page->objects);
 
        /*
         * lockdep requires consistent irq usage for each lock
@@ -2341,6 +2347,8 @@ static int calculate_sizes(struct kmem_cache *s)
         * Determine the number of objects per slab
         */
        s->oo = oo_make(order, size);
+       if (oo_objects(s->oo) > oo_objects(s->max))
+               s->max = s->oo;
 
        return !!oo_objects(s->oo);
 
@@ -2813,7 +2821,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
        struct kmem_cache_node *n;
        struct page *page;
        struct page *t;
-       int objects = oo_objects(s->oo);
+       int objects = oo_objects(s->max);
        struct list_head *slabs_by_inuse =
                kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
        unsigned long flags;
@@ -3276,7 +3284,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 }
 
 #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
-static unsigned long count_partial(struct kmem_cache_node *n)
+static unsigned long count_partial(struct kmem_cache_node *n,
+                                       int (*get_count)(struct page *))
 {
        unsigned long flags;
        unsigned long x = 0;
@@ -3284,10 +3293,25 @@ static unsigned long count_partial(struct kmem_cache_node *n)
 
        spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry(page, &n->partial, lru)
-               x += page->inuse;
+               x += get_count(page);
        spin_unlock_irqrestore(&n->list_lock, flags);
        return x;
 }
+
+static int count_inuse(struct page *page)
+{
+       return page->inuse;
+}
+
+static int count_total(struct page *page)
+{
+       return page->objects;
+}
+
+static int count_free(struct page *page)
+{
+       return page->objects - page->inuse;
+}
 #endif
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
@@ -3376,7 +3400,7 @@ static long validate_slab_cache(struct kmem_cache *s)
 {
        int node;
        unsigned long count = 0;
-       unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->oo)) *
+       unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
                                sizeof(unsigned long), GFP_KERNEL);
 
        if (!map)
@@ -3676,22 +3700,23 @@ static int list_locations(struct kmem_cache *s, char *buf,
 }
 
 enum slab_stat_type {
-       SL_FULL,
-       SL_PARTIAL,
-       SL_CPU,
-       SL_OBJECTS
+       SL_ALL,                 /* All slabs */
+       SL_PARTIAL,             /* Only partially allocated slabs */
+       SL_CPU,                 /* Only slabs used for cpu caches */
+       SL_OBJECTS,             /* Determine allocated objects not slabs */
+       SL_TOTAL                /* Determine object capacity not slabs */
 };
 
-#define SO_FULL                (1 << SL_FULL)
+#define SO_ALL         (1 << SL_ALL)
 #define SO_PARTIAL     (1 << SL_PARTIAL)
 #define SO_CPU         (1 << SL_CPU)
 #define SO_OBJECTS     (1 << SL_OBJECTS)
+#define SO_TOTAL       (1 << SL_TOTAL)
 
 static ssize_t show_slab_objects(struct kmem_cache *s,
                            char *buf, unsigned long flags)
 {
        unsigned long total = 0;
-       int cpu;
        int node;
        int x;
        unsigned long *nodes;
@@ -3702,56 +3727,60 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                return -ENOMEM;
        per_cpu = nodes + nr_node_ids;
 
-       for_each_possible_cpu(cpu) {
-               struct page *page;
-               struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+       if (flags & SO_CPU) {
+               int cpu;
 
-               if (!c)
-                       continue;
+               for_each_possible_cpu(cpu) {
+                       struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
-               page = c->page;
-               node = c->node;
-               if (node < 0)
-                       continue;
-               if (page) {
-                       if (flags & SO_CPU) {
-                               if (flags & SO_OBJECTS)
-                                       x = page->inuse;
+                       if (!c || c->node < 0)
+                               continue;
+
+                       if (c->page) {
+                                       if (flags & SO_TOTAL)
+                                               x = c->page->objects;
+                               else if (flags & SO_OBJECTS)
+                                       x = c->page->inuse;
                                else
                                        x = 1;
+
                                total += x;
-                               nodes[node] += x;
+                               nodes[c->node] += x;
                        }
-                       per_cpu[node]++;
+                       per_cpu[c->node]++;
                }
        }
 
-       for_each_node_state(node, N_NORMAL_MEMORY) {
-               struct kmem_cache_node *n = get_node(s, node);
+       if (flags & SO_ALL) {
+               for_each_node_state(node, N_NORMAL_MEMORY) {
+                       struct kmem_cache_node *n = get_node(s, node);
+
+               if (flags & SO_TOTAL)
+                       x = atomic_long_read(&n->total_objects);
+               else if (flags & SO_OBJECTS)
+                       x = atomic_long_read(&n->total_objects) -
+                               count_partial(n, count_free);
 
-               if (flags & SO_PARTIAL) {
-                       if (flags & SO_OBJECTS)
-                               x = count_partial(n);
                        else
-                               x = n->nr_partial;
+                               x = atomic_long_read(&n->nr_slabs);
                        total += x;
                        nodes[node] += x;
                }
 
-               if (flags & SO_FULL) {
-                       int full_slabs = atomic_long_read(&n->nr_slabs)
-                                       - per_cpu[node]
-                                       - n->nr_partial;
+       } else if (flags & SO_PARTIAL) {
+               for_each_node_state(node, N_NORMAL_MEMORY) {
+                       struct kmem_cache_node *n = get_node(s, node);
 
-                       if (flags & SO_OBJECTS)
-                               x = full_slabs * oo_objects(s->oo);
+                       if (flags & SO_TOTAL)
+                               x = count_partial(n, count_total);
+                       else if (flags & SO_OBJECTS)
+                               x = count_partial(n, count_inuse);
                        else
-                               x = full_slabs;
+                               x = n->nr_partial;
                        total += x;
                        nodes[node] += x;
                }
        }
-
        x = sprintf(buf, "%lu", total);
 #ifdef CONFIG_NUMA
        for_each_node_state(node, N_NORMAL_MEMORY)
@@ -3852,7 +3881,7 @@ SLAB_ATTR_RO(aliases);
 
 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
 {
-       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
+       return show_slab_objects(s, buf, SO_ALL);
 }
 SLAB_ATTR_RO(slabs);
 
@@ -3870,10 +3899,22 @@ SLAB_ATTR_RO(cpu_slabs);
 
 static ssize_t objects_show(struct kmem_cache *s, char *buf)
 {
-       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
+       return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
 }
 SLAB_ATTR_RO(objects);
 
+static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
+{
+       return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
+}
+SLAB_ATTR_RO(objects_partial);
+
+static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
+{
+       return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
+}
+SLAB_ATTR_RO(total_objects);
+
 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
 {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
@@ -4131,6 +4172,8 @@ static struct attribute *slab_attrs[] = {
        &objs_per_slab_attr.attr,
        &order_attr.attr,
        &objects_attr.attr,
+       &objects_partial_attr.attr,
+       &total_objects_attr.attr,
        &slabs_attr.attr,
        &partial_attr.attr,
        &cpu_slabs_attr.attr,
@@ -4459,7 +4502,8 @@ static int s_show(struct seq_file *m, void *p)
        unsigned long nr_partials = 0;
        unsigned long nr_slabs = 0;
        unsigned long nr_inuse = 0;
-       unsigned long nr_objs;
+       unsigned long nr_objs = 0;
+       unsigned long nr_free = 0;
        struct kmem_cache *s;
        int node;
 
@@ -4473,11 +4517,11 @@ static int s_show(struct seq_file *m, void *p)
 
                nr_partials += n->nr_partial;
                nr_slabs += atomic_long_read(&n->nr_slabs);
-               nr_inuse += count_partial(n);
+               nr_objs += atomic_long_read(&n->total_objects);
+               nr_free += count_partial(n, count_free);
        }
 
-       nr_objs = nr_slabs * oo_objects(s->oo);
-       nr_inuse += (nr_slabs - nr_partials) * oo_objects(s->oo);
+       nr_inuse = nr_objs - nr_free;
 
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
                   nr_objs, s->size, oo_objects(s->oo),