]> err.no Git - linux-2.6/blobdiff - net/netfilter/nf_conntrack_core.c
[NETFILTER]: nf_conntrack: split out protocol handling
[linux-2.6] / net / netfilter / nf_conntrack_core.c
index cf6face67af448097093337ac9a36f1d7f075b30..09c0e63110443ccc6358c2255dff3b9006830708 100644 (file)
@@ -73,8 +73,6 @@ DEFINE_RWLOCK(nf_conntrack_lock);
 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
 
 void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
-struct nf_conntrack_protocol **nf_ct_protos[PF_MAX] __read_mostly;
-struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX] __read_mostly;
 unsigned int nf_conntrack_htable_size __read_mostly = 0;
 int nf_conntrack_max __read_mostly;
 struct list_head *nf_conntrack_hash __read_mostly;
@@ -85,73 +83,6 @@ static int nf_conntrack_vmalloc __read_mostly;
 
 static unsigned int nf_conntrack_next_id;
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
-ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
-
-DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
-
-/* deliver cached events and clear cache entry - must be called with locally
- * disabled softirqs */
-static inline void
-__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
-{
-       DEBUGP("ecache: delivering events for %p\n", ecache->ct);
-       if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
-           && ecache->events)
-               atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
-                                   ecache->ct);
-
-       ecache->events = 0;
-       nf_ct_put(ecache->ct);
-       ecache->ct = NULL;
-}
-
-/* Deliver all cached events for a particular conntrack. This is called
- * by code prior to async packet handling for freeing the skb */
-void nf_ct_deliver_cached_events(const struct nf_conn *ct)
-{
-       struct nf_conntrack_ecache *ecache;
-
-       local_bh_disable();
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
-       if (ecache->ct == ct)
-               __nf_ct_deliver_cached_events(ecache);
-       local_bh_enable();
-}
-
-/* Deliver cached events for old pending events, if current conntrack != old */
-void __nf_ct_event_cache_init(struct nf_conn *ct)
-{
-       struct nf_conntrack_ecache *ecache;
-       
-       /* take care of delivering potentially old events */
-       ecache = &__get_cpu_var(nf_conntrack_ecache);
-       BUG_ON(ecache->ct == ct);
-       if (ecache->ct)
-               __nf_ct_deliver_cached_events(ecache);
-       /* initialize for this conntrack/packet */
-       ecache->ct = ct;
-       nf_conntrack_get(&ct->ct_general);
-}
-
-/* flush the event cache - touches other CPU's data and must not be called
- * while packets are still passing through the code */
-static void nf_ct_event_cache_flush(void)
-{
-       struct nf_conntrack_ecache *ecache;
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               ecache = &per_cpu(nf_conntrack_ecache, cpu);
-               if (ecache->ct)
-                       nf_ct_put(ecache->ct);
-       }
-}
-#else
-static inline void nf_ct_event_cache_flush(void) {}
-#endif /* CONFIG_NF_CONNTRACK_EVENTS */
-
 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
 
@@ -182,85 +113,6 @@ DEFINE_RWLOCK(nf_ct_cache_lock);
 /* This avoids calling kmem_cache_create() with same name simultaneously */
 static DEFINE_MUTEX(nf_ct_cache_mutex);
 
-extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
-struct nf_conntrack_protocol *
-__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
-{
-       if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
-               return &nf_conntrack_generic_protocol;
-
-       return nf_ct_protos[l3proto][protocol];
-}
-
-/* this is guaranteed to always return a valid protocol helper, since
- * it falls back to generic_protocol */
-struct nf_conntrack_protocol *
-nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
-{
-       struct nf_conntrack_protocol *p;
-
-       preempt_disable();
-       p = __nf_ct_proto_find(l3proto, protocol);
-       if (!try_module_get(p->me))
-               p = &nf_conntrack_generic_protocol;
-       preempt_enable();
-       
-       return p;
-}
-
-void nf_ct_proto_put(struct nf_conntrack_protocol *p)
-{
-       module_put(p->me);
-}
-
-struct nf_conntrack_l3proto *
-nf_ct_l3proto_find_get(u_int16_t l3proto)
-{
-       struct nf_conntrack_l3proto *p;
-
-       preempt_disable();
-       p = __nf_ct_l3proto_find(l3proto);
-       if (!try_module_get(p->me))
-               p = &nf_conntrack_generic_l3proto;
-       preempt_enable();
-
-       return p;
-}
-
-void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
-{
-       module_put(p->me);
-}
-
-int
-nf_ct_l3proto_try_module_get(unsigned short l3proto)
-{
-       int ret;
-       struct nf_conntrack_l3proto *p;
-
-retry: p = nf_ct_l3proto_find_get(l3proto);
-       if (p == &nf_conntrack_generic_l3proto) {
-               ret = request_module("nf_conntrack-%d", l3proto);
-               if (!ret)
-                       goto retry;
-
-               return -EPROTOTYPE;
-       }
-
-       return 0;
-}
-
-void nf_ct_l3proto_module_put(unsigned short l3proto)
-{
-       struct nf_conntrack_l3proto *p;
-
-       preempt_disable();
-       p = __nf_ct_l3proto_find(l3proto);
-       preempt_enable();
-
-       module_put(p->me);
-}
-
 static int nf_conntrack_hash_rnd_initted;
 static unsigned int nf_conntrack_hash_rnd;