2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
8 * The core of SLOB is a traditional K&R style heap allocator, with
9 * support for returning aligned objects. The granularity of this
10 * allocator is 4 bytes on 32-bit and 8 bytes on 64-bit, though it
11 * could be as low as 2 if the compiler alignment requirements allow.
13 * The slob heap is a linked list of pages from __get_free_page, and
14 * within each page, there is a singly-linked list of free blocks (slob_t).
15 * The heap is grown on demand and allocation from the heap is currently
18 * Above this is an implementation of kmalloc/kfree. Blocks returned
19 * from kmalloc are 4-byte aligned and prepended with a 4-byte header.
20 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
21 * __get_free_pages directly, allocating compound pages so the page order
22 * does not have to be separately tracked, and also stores the exact
23 * allocation size in page->private so that it can be used to accurately
24 * provide ksize(). These objects are detected in kfree() because slob_page()
27 * SLAB is emulated on top of SLOB by simply calling constructors and
28 * destructors for every SLAB allocation. Objects are returned with the
29 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
30 * case the low-level allocator will fragment blocks to create the proper
31 * alignment. Again, objects of page-size or greater are allocated by
32 * calling __get_free_pages. As SLAB objects know their size, no separate
33 * size bookkeeping is necessary and there is essentially no allocation
34 * space overhead, and compound pages aren't needed for multi-page
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
41 #include <linux/cache.h>
42 #include <linux/init.h>
43 #include <linux/module.h>
44 #include <linux/rcupdate.h>
45 #include <linux/list.h>
46 #include <asm/atomic.h>
48 /* SLOB_MIN_ALIGN == sizeof(long) */
49 #if BITS_PER_BYTE == 32
50 #define SLOB_MIN_ALIGN 4
52 #define SLOB_MIN_ALIGN 8
56 * slob_block has a field 'units', which indicates size of block if +ve,
57 * or offset of next block if -ve (in SLOB_UNITs).
59 * Free blocks of size 1 unit simply contain the offset of the next block.
60 * Those with larger size contain their size in the first SLOB_UNIT of
61 * memory, and the offset of the next free block in the second SLOB_UNIT.
63 #if PAGE_SIZE <= (32767 * SLOB_MIN_ALIGN)
64 typedef s16 slobidx_t;
66 typedef s32 slobidx_t;
70 * Align struct slob_block to long for now, but can some embedded
71 * architectures get away with less?
75 } __attribute__((aligned(SLOB_MIN_ALIGN)));
76 typedef struct slob_block slob_t;
79 * We use struct page fields to manage some slob allocation aspects,
80 * however to avoid the horrible mess in include/linux/mm_types.h, we'll
81 * just define our own struct page type variant here.
86 unsigned long flags; /* mandatory */
87 atomic_t _count; /* mandatory */
88 slobidx_t units; /* free units left in page */
90 slob_t *free; /* first free slob_t in page */
91 struct list_head list; /* linked list of free pages */
96 static inline void struct_slob_page_wrong_size(void)
97 { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
100 * free_slob_page: call before a slob_page is returned to the page allocator.
102 static inline void free_slob_page(struct slob_page *sp)
104 reset_page_mapcount(&sp->page);
105 sp->page.mapping = NULL;
109 * All (partially) free slob pages go on this list.
111 static LIST_HEAD(free_slob_pages);
114 * slob_page: True for all slob pages (false for bigblock pages)
116 static inline int slob_page(struct slob_page *sp)
118 return test_bit(PG_active, &sp->flags);
121 static inline void set_slob_page(struct slob_page *sp)
123 __set_bit(PG_active, &sp->flags);
126 static inline void clear_slob_page(struct slob_page *sp)
128 __clear_bit(PG_active, &sp->flags);
132 * slob_page_free: true for pages on free_slob_pages list.
134 static inline int slob_page_free(struct slob_page *sp)
136 return test_bit(PG_private, &sp->flags);
139 static inline void set_slob_page_free(struct slob_page *sp)
141 list_add(&sp->list, &free_slob_pages);
142 __set_bit(PG_private, &sp->flags);
145 static inline void clear_slob_page_free(struct slob_page *sp)
148 __clear_bit(PG_private, &sp->flags);
151 #define SLOB_UNIT sizeof(slob_t)
152 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
153 #define SLOB_ALIGN L1_CACHE_BYTES
156 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
157 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
158 * the block using call_rcu.
161 struct rcu_head head;
166 * slob_lock protects all slob allocator structures.
168 static DEFINE_SPINLOCK(slob_lock);
171 * Encode the given size and next info into a free slob block s.
173 static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
175 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
176 slobidx_t offset = next - base;
182 s[0].units = -offset;
186 * Return the size of a slob block.
188 static slobidx_t slob_units(slob_t *s)
196 * Return the next free slob block pointer after this one.
198 static slob_t *slob_next(slob_t *s)
200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
211 * Returns true if s is the last free block in its page.
213 static int slob_last(slob_t *s)
215 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
219 * Allocate a slob block within a given slob_page sp.
221 static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
223 slob_t *prev, *cur, *aligned = 0;
224 int delta = 0, units = SLOB_UNITS(size);
226 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
227 slobidx_t avail = slob_units(cur);
230 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
231 delta = aligned - cur;
233 if (avail >= units + delta) { /* room enough? */
236 if (delta) { /* need to fragment head to align? */
237 next = slob_next(cur);
238 set_slob(aligned, avail - delta, next);
239 set_slob(cur, delta, aligned);
242 avail = slob_units(cur);
245 next = slob_next(cur);
246 if (avail == units) { /* exact fit? unlink. */
248 set_slob(prev, slob_units(prev), next);
251 } else { /* fragment */
253 set_slob(prev, slob_units(prev), cur + units);
255 sp->free = cur + units;
256 set_slob(cur + units, avail - units, next);
261 clear_slob_page_free(sp);
270 * slob_alloc: entry point into the slob allocator.
272 static void *slob_alloc(size_t size, gfp_t gfp, int align)
274 struct slob_page *sp;
278 spin_lock_irqsave(&slob_lock, flags);
279 /* Iterate through each partially free page, try to find room */
280 list_for_each_entry(sp, &free_slob_pages, list) {
281 if (sp->units >= SLOB_UNITS(size)) {
282 b = slob_page_alloc(sp, size, align);
287 spin_unlock_irqrestore(&slob_lock, flags);
289 /* Not enough space: must allocate a new page */
291 b = (slob_t *)__get_free_page(gfp);
294 sp = (struct slob_page *)virt_to_page(b);
297 spin_lock_irqsave(&slob_lock, flags);
298 sp->units = SLOB_UNITS(PAGE_SIZE);
300 INIT_LIST_HEAD(&sp->list);
301 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
302 set_slob_page_free(sp);
303 b = slob_page_alloc(sp, size, align);
305 spin_unlock_irqrestore(&slob_lock, flags);
311 * slob_free: entry point into the slob allocator.
313 static void slob_free(void *block, int size)
315 struct slob_page *sp;
316 slob_t *prev, *next, *b = (slob_t *)block;
324 sp = (struct slob_page *)virt_to_page(block);
325 units = SLOB_UNITS(size);
327 spin_lock_irqsave(&slob_lock, flags);
329 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
330 /* Go directly to page allocator. Do not pass slob allocator */
331 if (slob_page_free(sp))
332 clear_slob_page_free(sp);
335 free_page((unsigned long)b);
339 if (!slob_page_free(sp)) {
340 /* This slob page is about to become partially free. Easy! */
344 (void *)((unsigned long)(b +
345 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
346 set_slob_page_free(sp);
351 * Otherwise the page is already partially free, so find reinsertion
357 set_slob(b, units, sp->free);
361 next = slob_next(prev);
364 next = slob_next(prev);
367 if (!slob_last(prev) && b + units == next) {
368 units += slob_units(next);
369 set_slob(b, units, slob_next(next));
371 set_slob(b, units, next);
373 if (prev + slob_units(prev) == b) {
374 units = slob_units(b) + slob_units(prev);
375 set_slob(prev, units, slob_next(b));
377 set_slob(prev, slob_units(prev), b);
380 spin_unlock_irqrestore(&slob_lock, flags);
384 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
387 void *__kmalloc(size_t size, gfp_t gfp)
389 if (size < PAGE_SIZE - SLOB_UNIT) {
391 m = slob_alloc(size + SLOB_UNIT, gfp, 0);
398 ret = (void *) __get_free_pages(gfp | __GFP_COMP,
402 page = virt_to_page(ret);
403 page->private = size;
408 EXPORT_SYMBOL(__kmalloc);
411 * krealloc - reallocate memory. The contents will remain unchanged.
413 * @p: object to reallocate memory for.
414 * @new_size: how many bytes of memory are required.
415 * @flags: the type of memory to allocate.
417 * The contents of the object pointed to are preserved up to the
418 * lesser of the new and old sizes. If @p is %NULL, krealloc()
419 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
420 * %NULL pointer, the object pointed to is freed.
422 void *krealloc(const void *p, size_t new_size, gfp_t flags)
427 return kmalloc_track_caller(new_size, flags);
429 if (unlikely(!new_size)) {
434 ret = kmalloc_track_caller(new_size, flags);
436 memcpy(ret, p, min(new_size, ksize(p)));
441 EXPORT_SYMBOL(krealloc);
443 void kfree(const void *block)
445 struct slob_page *sp;
450 sp = (struct slob_page *)virt_to_page(block);
452 slob_t *m = (slob_t *)block - 1;
453 slob_free(m, m->units + SLOB_UNIT);
458 EXPORT_SYMBOL(kfree);
460 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
461 size_t ksize(const void *block)
463 struct slob_page *sp;
468 sp = (struct slob_page *)virt_to_page(block);
470 return ((slob_t *)block - 1)->units + SLOB_UNIT;
472 return sp->page.private;
476 unsigned int size, align;
479 void (*ctor)(void *, struct kmem_cache *, unsigned long);
482 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
483 size_t align, unsigned long flags,
484 void (*ctor)(void*, struct kmem_cache *, unsigned long),
485 void (*dtor)(void*, struct kmem_cache *, unsigned long))
487 struct kmem_cache *c;
489 c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
494 if (flags & SLAB_DESTROY_BY_RCU) {
495 /* leave room for rcu footer at the end of object */
496 c->size += sizeof(struct slob_rcu);
500 /* ignore alignment unless it's forced */
501 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
502 if (c->align < align)
504 } else if (flags & SLAB_PANIC)
505 panic("Cannot create slab cache %s\n", name);
509 EXPORT_SYMBOL(kmem_cache_create);
511 void kmem_cache_destroy(struct kmem_cache *c)
513 slob_free(c, sizeof(struct kmem_cache));
515 EXPORT_SYMBOL(kmem_cache_destroy);
517 void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
521 if (c->size < PAGE_SIZE)
522 b = slob_alloc(c->size, flags, c->align);
524 b = (void *)__get_free_pages(flags, get_order(c->size));
531 EXPORT_SYMBOL(kmem_cache_alloc);
533 void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
535 void *ret = kmem_cache_alloc(c, flags);
537 memset(ret, 0, c->size);
541 EXPORT_SYMBOL(kmem_cache_zalloc);
543 static void __kmem_cache_free(void *b, int size)
545 if (size < PAGE_SIZE)
548 free_pages((unsigned long)b, get_order(size));
551 static void kmem_rcu_free(struct rcu_head *head)
553 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
554 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
556 __kmem_cache_free(b, slob_rcu->size);
559 void kmem_cache_free(struct kmem_cache *c, void *b)
561 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
562 struct slob_rcu *slob_rcu;
563 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
564 INIT_RCU_HEAD(&slob_rcu->head);
565 slob_rcu->size = c->size;
566 call_rcu(&slob_rcu->head, kmem_rcu_free);
568 __kmem_cache_free(b, c->size);
571 EXPORT_SYMBOL(kmem_cache_free);
573 unsigned int kmem_cache_size(struct kmem_cache *c)
577 EXPORT_SYMBOL(kmem_cache_size);
579 const char *kmem_cache_name(struct kmem_cache *c)
583 EXPORT_SYMBOL(kmem_cache_name);
585 int kmem_cache_shrink(struct kmem_cache *d)
589 EXPORT_SYMBOL(kmem_cache_shrink);
591 int kmem_ptr_validate(struct kmem_cache *a, const void *b)
596 void __init kmem_cache_init(void)