X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fslab.h;h=2271886744f87a5fb43d5f307b7c062dc54528c1;hb=f0eef25339f92f7cd4aeea23d9ae97987a5a1e82;hp=e67314e4a0a016e6009ac766baf6be0104656fd9;hpb=b86c089b83b8ae2bc814db865057768a9ba787b5;p=linux-2.6 diff --git a/include/linux/slab.h b/include/linux/slab.h index e67314e4a0..2271886744 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -7,27 +7,17 @@ #ifndef _LINUX_SLAB_H #define _LINUX_SLAB_H -#if defined(__KERNEL__) +#ifdef __KERNEL__ -typedef struct kmem_cache kmem_cache_t; +#include +#include +#include +#include /* kmalloc_sizes.h needs PAGE_SIZE */ +#include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ +#include -#include -#include -#include -#include /* kmalloc_sizes.h needs PAGE_SIZE */ -#include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ - -/* flags for kmem_cache_alloc() */ -#define SLAB_NOFS GFP_NOFS -#define SLAB_NOIO GFP_NOIO -#define SLAB_ATOMIC GFP_ATOMIC -#define SLAB_USER GFP_USER -#define SLAB_KERNEL GFP_KERNEL -#define SLAB_DMA GFP_DMA - -#define SLAB_LEVEL_MASK GFP_LEVEL_MASK - -#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */ +/* kmem_cache_t exists for legacy reasons and is not used by code in mm */ +typedef struct kmem_cache kmem_cache_t __deprecated; /* flags to pass to kmem_cache_create(). * The first 3 are only valid when the allocator as been build @@ -57,22 +47,23 @@ typedef struct kmem_cache kmem_cache_t; /* prototypes */ extern void __init kmem_cache_init(void); -extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, - void (*)(void *, kmem_cache_t *, unsigned long), - void (*)(void *, kmem_cache_t *, unsigned long)); -extern void kmem_cache_destroy(kmem_cache_t *); -extern int kmem_cache_shrink(kmem_cache_t *); -extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); +extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, + void (*)(void *, struct kmem_cache *, unsigned long), + void (*)(void *, struct kmem_cache *, unsigned long)); +extern void kmem_cache_destroy(struct kmem_cache *); +extern int kmem_cache_shrink(struct kmem_cache *); +extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t); extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); -extern void kmem_cache_free(kmem_cache_t *, void *); -extern unsigned int kmem_cache_size(kmem_cache_t *); -extern const char *kmem_cache_name(kmem_cache_t *); +extern void kmem_cache_free(struct kmem_cache *, void *); +extern unsigned int kmem_cache_size(struct kmem_cache *); +extern const char *kmem_cache_name(struct kmem_cache *); /* Size description struct for general caches. */ struct cache_sizes { - size_t cs_size; - kmem_cache_t *cs_cachep; - kmem_cache_t *cs_dmacachep; + size_t cs_size; + struct kmem_cache *cs_cachep; + struct kmem_cache *cs_dmacachep; }; extern struct cache_sizes malloc_sizes[]; @@ -211,7 +202,7 @@ extern unsigned int ksize(const void *); extern int slab_is_available(void); #ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); +extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *__kmalloc_node(size_t size, gfp_t flags, int node); static inline void *kmalloc_node(size_t size, gfp_t flags, int node) @@ -255,7 +246,8 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); __builtin_return_address(0)) #endif #else /* CONFIG_NUMA */ -static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) +static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, + gfp_t flags, int node) { return kmem_cache_alloc(cachep, flags); } @@ -269,7 +261,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) #endif extern int FASTCALL(kmem_cache_reap(int)); -extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); +extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)); #else /* CONFIG_SLOB */