static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->objsize - 1);
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->objsize - 1);
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
object_err(s, page, object,
"page slab pointer corrupt.");
goto fail;
object_err(s, page, object,
"page slab pointer corrupt.");
goto fail;
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
+static void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
- list_add_tail(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
-}
-
-static void add_partial(struct kmem_cache_node *n, struct page *page)
-{
- spin_lock(&n->list_lock);
- n->nr_partial++;
- list_add(&page->lru, &n->partial);
+ if (tail)
+ list_add_tail(&page->lru, &n->partial);
+ else
+ list_add(&page->lru, &n->partial);
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
struct page *page = c->page;
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
struct page *page = c->page;
/*
* Merge cpu freelist into freelist. Typically we get here
* because both freelists are empty. So this is unlikely
/*
* Merge cpu freelist into freelist. Typically we get here
* because both freelists are empty. So this is unlikely
- add_partial_tail(get_node(s, page_to_nid(page)), page);
+ add_partial(get_node(s, page_to_nid(page)), page, 1);
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
+ /*
+ * lockdep requires consistent irq usage for each lock
+ * so even though there cannot be a race this early in
+ * the boot sequence, we still disable irqs.
+ */
+ local_irq_save(flags);
+ add_partial(n, page, 0);
+ local_irq_restore(flags);
+static unsigned long count_partial(struct kmem_cache_node *n)
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += page->inuse;
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
-static struct notifier_block __cpuinitdata slab_notifier =
- { &slab_cpuup_callback, NULL, 0 };
+static struct notifier_block __cpuinitdata slab_notifier = {
+ &slab_cpuup_callback, NULL, 0
+};
return slab_alloc(s, gfpflags, node, caller);
}
return slab_alloc(s, gfpflags, node, caller);
}
-static unsigned long count_partial(struct kmem_cache_node *n)
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += page->inuse;
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
l->min_time,
div_long_long_rem(l->sum_time, l->count, &remainder),
l->max_time);
} else
l->min_time,
div_long_long_rem(l->sum_time, l->count, &remainder),
l->max_time);
} else
- n < PAGE_SIZE - 60) {
- n += sprintf(buf + n, " cpus=");
- n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
+ len < PAGE_SIZE - 60) {
+ len += sprintf(buf + len, " cpus=");
+ len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
- n < PAGE_SIZE - 60) {
- n += sprintf(buf + n, " nodes=");
- n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
+ len < PAGE_SIZE - 60) {
+ len += sprintf(buf + len, " nodes=");
+ len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
const char *buf, size_t length)
{
int n = simple_strtoul(buf, NULL, 10);
if (n < 100)
const char *buf, size_t length)
{
int n = simple_strtoul(buf, NULL, 10);
if (n < 100)
static struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
static struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
* This is typically the case for debug situations. In that
* case we can catch duplicate names easily.
*/
* This is typically the case for debug situations. In that
* case we can catch duplicate names easily.
*/
- kobj_set_kset_s(s, slab_subsys);
- kobject_set_name(&s->kobj, name);
- kobject_init(&s->kobj);
- err = kobject_add(&s->kobj);
- if (err)
+ s->kobj.kset = slab_kset;
+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
+ if (err) {
+ kobject_put(&s->kobj);
- sysfs_remove_link(&slab_subsys.kobj, name);
- return sysfs_create_link(&slab_subsys.kobj,
- &s->kobj, name);
+ sysfs_remove_link(&slab_kset->kobj, name);
+ return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);