X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=mm%2Fslab.c;h=c9e706db46340f81f723e899d45510cdfe78d50c;hb=242e54686257493f0b10ac557e730419d9af7d24;hp=ec660d85ddd769c1f94b3385ca571ebb8b316005;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=linux-2.6 diff --git a/mm/slab.c b/mm/slab.c index ec660d85dd..c9e706db46 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -92,6 +92,7 @@ #include #include #include +#include #include #include @@ -583,7 +584,8 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, + unsigned int __nocast gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -607,6 +609,13 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) return csizep->cs_cachep; } +kmem_cache_t *kmem_find_general_cachep(size_t size, + unsigned int __nocast gfpflags) +{ + return __find_general_cachep(size, gfpflags); +} +EXPORT_SYMBOL(kmem_find_general_cachep); + /* Cal the num objs, wastage, and bytes left over for a given slab size. */ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, int flags, size_t *left_over, unsigned int *num) @@ -672,14 +681,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries, int memsize = sizeof(void*)*entries+sizeof(struct array_cache); struct array_cache *nc = NULL; - if (cpu != -1) { - kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(memsize, GFP_KERNEL); - if (cachep) - nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu)); - } - if (!nc) + if (cpu == -1) nc = kmalloc(memsize, GFP_KERNEL); + else + nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu)); + if (nc) { nc->avail = 0; nc->limit = entries; @@ -1663,7 +1669,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep) } if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_kernel(); + synchronize_rcu(); /* no cpu_online check required here since we clear the percpu * array on cpu offline and set this to NULL. @@ -2096,7 +2102,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) #if DEBUG static void * cache_alloc_debugcheck_after(kmem_cache_t *cachep, - unsigned long flags, void *objp, void *caller) + unsigned int __nocast flags, void *objp, void *caller) { if (!objp) return objp; @@ -2361,13 +2367,16 @@ out: * and can sleep. And it will allocate memory on the given node, which * can improve the performance for cpu bound structures. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) { int loop; void *objp; struct slab *slabp; kmem_bufctl_t next; + if (nodeid == -1) + return kmem_cache_alloc(cachep, flags); + for (loop = 0;;loop++) { struct list_head *q; @@ -2393,7 +2402,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) spin_unlock_irq(&cachep->spinlock); local_irq_disable(); - if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { + if (!cache_grow(cachep, flags, nodeid)) { local_irq_enable(); return NULL; } @@ -2435,6 +2444,16 @@ got_slabp: } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) +{ + kmem_cache_t *cachep; + + cachep = kmem_find_general_cachep(size, flags); + if (unlikely(cachep == NULL)) + return NULL; + return kmem_cache_alloc_node(cachep, flags, node); +} +EXPORT_SYMBOL(kmalloc_node); #endif /** @@ -2462,7 +2481,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags) { kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(size, flags); + /* If you want to save a few bytes .text space: replace + * __ with kmem_. + * Then kmalloc uses the uninlined functions instead of the inline + * functions. + */ + cachep = __find_general_cachep(size, flags); if (unlikely(cachep == NULL)) return NULL; return __cache_alloc(cachep, flags); @@ -2489,9 +2513,8 @@ void *__alloc_percpu(size_t size, size_t align) for (i = 0; i < NR_CPUS; i++) { if (!cpu_possible(i)) continue; - pdata->ptrs[i] = kmem_cache_alloc_node( - kmem_find_general_cachep(size, GFP_KERNEL), - cpu_to_node(i)); + pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(i)); if (!pdata->ptrs[i]) goto unwind_oom; @@ -2603,6 +2626,12 @@ unsigned int kmem_cache_size(kmem_cache_t *cachep) } EXPORT_SYMBOL(kmem_cache_size); +const char *kmem_cache_name(kmem_cache_t *cachep) +{ + return cachep->name; +} +EXPORT_SYMBOL_GPL(kmem_cache_name); + struct ccupdate_struct { kmem_cache_t *cachep; struct array_cache *new[NR_CPUS]; @@ -2828,6 +2857,7 @@ next: } check_irq_on(); up(&cache_chain_sem); + drain_remote_pages(); /* Setup the next iteration */ schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC + smp_processor_id()); } @@ -3058,3 +3088,26 @@ unsigned int ksize(const void *objp) return size; } + + +/* + * kstrdup - allocate space for and copy an existing string + * + * @s: the string to duplicate + * @gfp: the GFP mask used in the kmalloc() call when allocating memory + */ +char *kstrdup(const char *s, unsigned int __nocast gfp) +{ + size_t len; + char *buf; + + if (!s) + return NULL; + + len = strlen(s) + 1; + buf = kmalloc(len, gfp); + if (buf) + memcpy(buf, s, len); + return buf; +} +EXPORT_SYMBOL(kstrdup);