X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fslub_def.h;h=57deecc79d52787800aa98b72d0cefe9e556e495;hb=4ee29f6a52158cea526b16a44ae38643946103ec;hp=92e10cf6d0e848ef39379fd38f4a3e52503bf3a5;hpb=b3fba8da653999c67d7517050f196e92da6f8d3b;p=linux-2.6 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 92e10cf6d0..57deecc79d 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -11,13 +11,36 @@ #include #include +enum stat_item { + ALLOC_FASTPATH, /* Allocation from cpu slab */ + ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ + FREE_FASTPATH, /* Free to cpu slub */ + FREE_SLOWPATH, /* Freeing not to cpu slab */ + FREE_FROZEN, /* Freeing to frozen slab */ + FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ + FREE_REMOVE_PARTIAL, /* Freeing removes last object */ + ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ + ALLOC_SLAB, /* Cpu slab acquired from page allocator */ + ALLOC_REFILL, /* Refill cpu slab from slab freelist */ + FREE_SLAB, /* Slab freed to the page allocator */ + CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ + DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ + DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ + DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ + DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ + DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + NR_SLUB_STAT_ITEMS }; + struct kmem_cache_cpu { - void **freelist; - struct page *page; - int node; - unsigned int offset; - /* Lots of wasted space */ -} ____cacheline_aligned_in_smp; + void **freelist; /* Pointer to first free per cpu object */ + struct page *page; /* The slab from which we are allocating */ + int node; /* The node of the page (or -1 for debug) */ + unsigned int offset; /* Freepointer offset (in word units) */ + unsigned int objsize; /* Size of an object (from kmem_cache) */ +#ifdef CONFIG_SLUB_STATS + unsigned stat[NR_SLUB_STAT_ITEMS]; +#endif +}; struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ @@ -48,8 +71,9 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in slab */ + gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *, struct kmem_cache *, unsigned long); + void (*ctor)(struct kmem_cache *, void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ @@ -59,10 +83,17 @@ struct kmem_cache { #endif #ifdef CONFIG_NUMA - int defrag_ratio; + /* + * Defragmentation by allocating from a remote node. + */ + int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; #endif - struct kmem_cache_cpu cpu_slab[NR_CPUS]; +#ifdef CONFIG_SMP + struct kmem_cache_cpu *cpu_slab[NR_CPUS]; +#else + struct kmem_cache_cpu cpu_slab; +#endif }; /* @@ -80,7 +111,7 @@ struct kmem_cache { * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; +extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -158,12 +189,16 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); +} + static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > PAGE_SIZE / 2) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + if (size > PAGE_SIZE) + return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -184,7 +219,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && - size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { + size <= PAGE_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s)