From dacd2a1a5cf621288833aa3c6e815b86a1536538 Mon Sep 17 00:00:00 2001 From: Yasuyuki Kozakai Date: Sat, 7 Jul 2007 22:25:51 -0700 Subject: [PATCH] [NETFILTER]: nf_conntrack: remove old memory allocator of conntrack Now memory space for help and NAT are allocated by extension infrastructure. Signed-off-by: Yasuyuki Kozakai Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- include/net/netfilter/nf_conntrack.h | 14 -- include/net/netfilter/nf_conntrack_l3proto.h | 2 - .../netfilter/nf_conntrack_l3proto_ipv4.c | 6 - .../netfilter/nf_conntrack_l3proto_ipv6.c | 6 - net/netfilter/nf_conntrack_core.c | 222 ++---------------- net/netfilter/nf_conntrack_l3proto_generic.c | 7 - 6 files changed, 15 insertions(+), 242 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index b2083d386f..71386e5c4b 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -117,9 +117,6 @@ struct nf_conn /* Unique ID that identifies this conntrack*/ unsigned int id; - /* features - nat, helper, ... used by allocating system */ - u_int32_t features; - #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif @@ -133,9 +130,6 @@ struct nf_conn /* Extensions */ struct nf_ct_ext *ext; - - /* features dynamically at the end: helper, nat (both optional) */ - char data[0]; }; static inline struct nf_conn * @@ -265,14 +259,6 @@ do { \ local_bh_enable(); \ } while (0) -/* no helper, no nat */ -#define NF_CT_F_BASIC 0 -/* for helper */ -#define NF_CT_F_HELP 1 -/* for nat. */ -#define NF_CT_F_NAT 2 -#define NF_CT_F_NUM 4 - extern int nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size); extern void diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 96a58d8e1d..890752d7f6 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -64,8 +64,6 @@ struct nf_conntrack_l3proto int (*prepare)(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, u_int8_t *protonum); - u_int32_t (*get_features)(const struct nf_conntrack_tuple *tuple); - int (*tuple_to_nfattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 7411dd16d7..129a8cccf4 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -103,11 +103,6 @@ ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, return NF_ACCEPT; } -static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple) -{ - return NF_CT_F_BASIC; -} - static unsigned int ipv4_confirm(unsigned int hooknum, struct sk_buff **pskb, const struct net_device *in, @@ -419,7 +414,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = { .print_tuple = ipv4_print_tuple, .print_conntrack = ipv4_print_conntrack, .prepare = ipv4_prepare, - .get_features = ipv4_get_features, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .tuple_to_nfattr = ipv4_tuple_to_nfattr, .nfattr_to_tuple = ipv4_nfattr_to_tuple, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 1b1797f1f3..747b01e531 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -147,11 +147,6 @@ ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff, return NF_ACCEPT; } -static u_int32_t ipv6_get_features(const struct nf_conntrack_tuple *tuple) -{ - return NF_CT_F_BASIC; -} - static unsigned int ipv6_confirm(unsigned int hooknum, struct sk_buff **pskb, const struct net_device *in, @@ -397,7 +392,6 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = { .ctl_table_path = nf_net_netfilter_sysctl_path, .ctl_table = nf_ct_ipv6_sysctl_table, #endif - .get_features = ipv6_get_features, .me = THIS_MODULE, }; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 914506e6c7..a713666529 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -71,39 +71,12 @@ EXPORT_SYMBOL_GPL(nf_conntrack_untracked); unsigned int nf_ct_log_invalid __read_mostly; LIST_HEAD(unconfirmed); static int nf_conntrack_vmalloc __read_mostly; - +static struct kmem_cache *nf_conntrack_cachep __read_mostly; static unsigned int nf_conntrack_next_id; DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); -/* - * This scheme offers various size of "struct nf_conn" dependent on - * features(helper, nat, ...) - */ - -#define NF_CT_FEATURES_NAMELEN 256 -static struct { - /* name of slab cache. printed in /proc/slabinfo */ - char *name; - - /* size of slab cache */ - size_t size; - - /* slab cache pointer */ - struct kmem_cache *cachep; - - /* allocated slab cache + modules which uses this slab cache */ - int use; - -} nf_ct_cache[NF_CT_F_NUM]; - -/* protect members of nf_ct_cache except of "use" */ -DEFINE_RWLOCK(nf_ct_cache_lock); - -/* This avoids calling kmem_cache_create() with same name simultaneously */ -static DEFINE_MUTEX(nf_ct_cache_mutex); - static int nf_conntrack_hash_rnd_initted; static unsigned int nf_conntrack_hash_rnd; @@ -126,122 +99,6 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) nf_conntrack_hash_rnd); } -int nf_conntrack_register_cache(u_int32_t features, const char *name, - size_t size) -{ - int ret = 0; - char *cache_name; - struct kmem_cache *cachep; - - DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", - features, name, size); - - if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) { - DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n", - features); - return -EINVAL; - } - - mutex_lock(&nf_ct_cache_mutex); - - write_lock_bh(&nf_ct_cache_lock); - /* e.g: multiple helpers are loaded */ - if (nf_ct_cache[features].use > 0) { - DEBUGP("nf_conntrack_register_cache: already resisterd.\n"); - if ((!strncmp(nf_ct_cache[features].name, name, - NF_CT_FEATURES_NAMELEN)) - && nf_ct_cache[features].size == size) { - DEBUGP("nf_conntrack_register_cache: reusing.\n"); - nf_ct_cache[features].use++; - ret = 0; - } else - ret = -EBUSY; - - write_unlock_bh(&nf_ct_cache_lock); - mutex_unlock(&nf_ct_cache_mutex); - return ret; - } - write_unlock_bh(&nf_ct_cache_lock); - - /* - * The memory space for name of slab cache must be alive until - * cache is destroyed. - */ - cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC); - if (cache_name == NULL) { - DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n"); - ret = -ENOMEM; - goto out_up_mutex; - } - - if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN) - >= NF_CT_FEATURES_NAMELEN) { - printk("nf_conntrack_register_cache: name too long\n"); - ret = -EINVAL; - goto out_free_name; - } - - cachep = kmem_cache_create(cache_name, size, 0, 0, - NULL, NULL); - if (!cachep) { - printk("nf_conntrack_register_cache: Can't create slab cache " - "for the features = 0x%x\n", features); - ret = -ENOMEM; - goto out_free_name; - } - - write_lock_bh(&nf_ct_cache_lock); - nf_ct_cache[features].use = 1; - nf_ct_cache[features].size = size; - nf_ct_cache[features].cachep = cachep; - nf_ct_cache[features].name = cache_name; - write_unlock_bh(&nf_ct_cache_lock); - - goto out_up_mutex; - -out_free_name: - kfree(cache_name); -out_up_mutex: - mutex_unlock(&nf_ct_cache_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(nf_conntrack_register_cache); - -/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ -void nf_conntrack_unregister_cache(u_int32_t features) -{ - struct kmem_cache *cachep; - char *name; - - /* - * This assures that kmem_cache_create() isn't called before destroying - * slab cache. - */ - DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features); - mutex_lock(&nf_ct_cache_mutex); - - write_lock_bh(&nf_ct_cache_lock); - if (--nf_ct_cache[features].use > 0) { - write_unlock_bh(&nf_ct_cache_lock); - mutex_unlock(&nf_ct_cache_mutex); - return; - } - cachep = nf_ct_cache[features].cachep; - name = nf_ct_cache[features].name; - nf_ct_cache[features].cachep = NULL; - nf_ct_cache[features].name = NULL; - nf_ct_cache[features].size = 0; - write_unlock_bh(&nf_ct_cache_lock); - - synchronize_net(); - - kmem_cache_destroy(cachep); - kfree(name); - - mutex_unlock(&nf_ct_cache_mutex); -} -EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache); - int nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, @@ -559,11 +416,8 @@ static int early_drop(struct list_head *chain) return dropped; } -static struct nf_conn * -__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_tuple *repl, - const struct nf_conntrack_l3proto *l3proto, - u_int32_t features) +struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, + const struct nf_conntrack_tuple *repl) { struct nf_conn *conntrack = NULL; @@ -589,65 +443,28 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, } } - /* find features needed by this conntrack. */ - features |= l3proto->get_features(orig); - - DEBUGP("nf_conntrack_alloc: features=0x%x\n", features); - - read_lock_bh(&nf_ct_cache_lock); - - if (unlikely(!nf_ct_cache[features].use)) { - DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n", - features); - goto out; - } - - conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC); + conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC); if (conntrack == NULL) { - DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n"); - goto out; + DEBUGP("nf_conntrack_alloc: Can't alloc conntrack.\n"); + atomic_dec(&nf_conntrack_count); + return ERR_PTR(-ENOMEM); } - memset(conntrack, 0, nf_ct_cache[features].size); - conntrack->features = features; atomic_set(&conntrack->ct_general.use, 1); conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; /* Don't set timer yet: wait for confirmation */ setup_timer(&conntrack->timeout, death_by_timeout, (unsigned long)conntrack); - read_unlock_bh(&nf_ct_cache_lock); - return conntrack; -out: - read_unlock_bh(&nf_ct_cache_lock); - atomic_dec(&nf_conntrack_count); return conntrack; } - -struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, - const struct nf_conntrack_tuple *repl) -{ - struct nf_conntrack_l3proto *l3proto; - struct nf_conn *ct; - - rcu_read_lock(); - l3proto = __nf_ct_l3proto_find(orig->src.l3num); - ct = __nf_conntrack_alloc(orig, repl, l3proto, 0); - rcu_read_unlock(); - - return ct; -} EXPORT_SYMBOL_GPL(nf_conntrack_alloc); void nf_conntrack_free(struct nf_conn *conntrack) { - u_int32_t features = conntrack->features; - NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM); nf_ct_ext_free(conntrack); - DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features, - conntrack); - kmem_cache_free(nf_ct_cache[features].cachep, conntrack); + kmem_cache_free(nf_conntrack_cachep, conntrack); atomic_dec(&nf_conntrack_count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); @@ -665,14 +482,13 @@ init_conntrack(const struct nf_conntrack_tuple *tuple, struct nf_conn_help *help; struct nf_conntrack_tuple repl_tuple; struct nf_conntrack_expect *exp; - u_int32_t features = 0; if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { DEBUGP("Can't invert tuple.\n"); return NULL; } - conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features); + conntrack = nf_conntrack_alloc(tuple, &repl_tuple); if (conntrack == NULL || IS_ERR(conntrack)) { DEBUGP("Can't allocate conntrack.\n"); return (struct nf_conntrack_tuple_hash *)conntrack; @@ -1128,8 +944,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_flush); supposed to kill the mall. */ void nf_conntrack_cleanup(void) { - int i; - rcu_assign_pointer(ip_ct_attach, NULL); /* This makes sure all current packets have passed through @@ -1150,14 +964,7 @@ void nf_conntrack_cleanup(void) rcu_assign_pointer(nf_ct_destroy, NULL); - for (i = 0; i < NF_CT_F_NUM; i++) { - if (nf_ct_cache[i].use == 0) - continue; - - NF_CT_ASSERT(nf_ct_cache[i].use == 1); - nf_ct_cache[i].use = 1; - nf_conntrack_unregister_cache(i); - } + kmem_cache_destroy(nf_conntrack_cachep); kmem_cache_destroy(nf_conntrack_expect_cachep); free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, nf_conntrack_htable_size); @@ -1267,9 +1074,10 @@ int __init nf_conntrack_init(void) goto err_out; } - ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic", - sizeof(struct nf_conn)); - if (ret < 0) { + nf_conntrack_cachep = kmem_cache_create("nf_conntrack", + sizeof(struct nf_conn), + 0, 0, NULL, NULL); + if (!nf_conntrack_cachep) { printk(KERN_ERR "Unable to create nf_conn slab cache\n"); goto err_free_hash; } @@ -1307,7 +1115,7 @@ out_fini_proto: out_free_expect_slab: kmem_cache_destroy(nf_conntrack_expect_cachep); err_free_conntrack_slab: - nf_conntrack_unregister_cache(NF_CT_F_BASIC); + kmem_cache_destroy(nf_conntrack_cachep); err_free_hash: free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc, nf_conntrack_htable_size); diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c index cbd96f3c1b..2fd0f11b8f 100644 --- a/net/netfilter/nf_conntrack_l3proto_generic.c +++ b/net/netfilter/nf_conntrack_l3proto_generic.c @@ -76,12 +76,6 @@ generic_prepare(struct sk_buff **pskb, unsigned int hooknum, } -static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple) - -{ - return NF_CT_F_BASIC; -} - struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { .l3proto = PF_UNSPEC, .name = "unknown", @@ -90,6 +84,5 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = { .print_tuple = generic_print_tuple, .print_conntrack = generic_print_conntrack, .prepare = generic_prepare, - .get_features = generic_get_features, }; EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic); -- 2.39.5