1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
14 * - new API and handling of conntrack/nat helpers
15 * - now capable of multiple expectations for one master
16 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
17 * - add usage/reference counts to ip_conntrack_expect
18 * - export ip_conntrack[_expect]_{find_get,put} functions
19 * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
20 * - generalize L3 protocol denendent part.
21 * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
22 * - add support various size of conntrack structures.
23 * 26 Jan 2006: Harald Welte <laforge@netfilter.org>
24 * - restructure nf_conn (introduce nf_conn_help)
25 * - redesign 'features' how they were originally intended
26 * 26 Feb 2006: Pablo Neira Ayuso <pablo@eurodev.net>
27 * - add support for L3 protocol module load on demand.
29 * Derived from net/ipv4/netfilter/ip_conntrack_core.c
32 #include <linux/config.h>
33 #include <linux/types.h>
34 #include <linux/netfilter.h>
35 #include <linux/module.h>
36 #include <linux/skbuff.h>
37 #include <linux/proc_fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/stddef.h>
40 #include <linux/slab.h>
41 #include <linux/random.h>
42 #include <linux/jhash.h>
43 #include <linux/err.h>
44 #include <linux/percpu.h>
45 #include <linux/moduleparam.h>
46 #include <linux/notifier.h>
47 #include <linux/kernel.h>
48 #include <linux/netdevice.h>
49 #include <linux/socket.h>
51 /* This rwlock protects the main hash table, protocol/helper/expected
52 registrations, conntrack timers*/
53 #define ASSERT_READ_LOCK(x)
54 #define ASSERT_WRITE_LOCK(x)
56 #include <net/netfilter/nf_conntrack.h>
57 #include <net/netfilter/nf_conntrack_l3proto.h>
58 #include <net/netfilter/nf_conntrack_protocol.h>
59 #include <net/netfilter/nf_conntrack_helper.h>
60 #include <net/netfilter/nf_conntrack_core.h>
61 #include <linux/netfilter_ipv4/listhelp.h>
63 #define NF_CONNTRACK_VERSION "0.5.0"
68 #define DEBUGP(format, args...)
71 DEFINE_RWLOCK(nf_conntrack_lock);
73 /* nf_conntrack_standalone needs this */
74 atomic_t nf_conntrack_count = ATOMIC_INIT(0);
76 void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
77 LIST_HEAD(nf_conntrack_expect_list);
78 struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
79 struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX];
80 static LIST_HEAD(helpers);
81 unsigned int nf_conntrack_htable_size = 0;
83 struct list_head *nf_conntrack_hash;
84 static kmem_cache_t *nf_conntrack_expect_cachep;
85 struct nf_conn nf_conntrack_untracked;
86 unsigned int nf_ct_log_invalid;
87 static LIST_HEAD(unconfirmed);
88 static int nf_conntrack_vmalloc;
90 static unsigned int nf_conntrack_next_id;
91 static unsigned int nf_conntrack_expect_next_id;
92 #ifdef CONFIG_NF_CONNTRACK_EVENTS
93 ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
94 ATOMIC_NOTIFIER_HEAD(nf_conntrack_expect_chain);
96 DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
98 /* deliver cached events and clear cache entry - must be called with locally
99 * disabled softirqs */
101 __nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
103 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
104 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
106 atomic_notifier_call_chain(&nf_conntrack_chain, ecache->events,
110 nf_ct_put(ecache->ct);
114 /* Deliver all cached events for a particular conntrack. This is called
115 * by code prior to async packet handling for freeing the skb */
116 void nf_ct_deliver_cached_events(const struct nf_conn *ct)
118 struct nf_conntrack_ecache *ecache;
121 ecache = &__get_cpu_var(nf_conntrack_ecache);
122 if (ecache->ct == ct)
123 __nf_ct_deliver_cached_events(ecache);
127 /* Deliver cached events for old pending events, if current conntrack != old */
128 void __nf_ct_event_cache_init(struct nf_conn *ct)
130 struct nf_conntrack_ecache *ecache;
132 /* take care of delivering potentially old events */
133 ecache = &__get_cpu_var(nf_conntrack_ecache);
134 BUG_ON(ecache->ct == ct);
136 __nf_ct_deliver_cached_events(ecache);
137 /* initialize for this conntrack/packet */
139 nf_conntrack_get(&ct->ct_general);
142 /* flush the event cache - touches other CPU's data and must not be called
143 * while packets are still passing through the code */
144 static void nf_ct_event_cache_flush(void)
146 struct nf_conntrack_ecache *ecache;
149 for_each_possible_cpu(cpu) {
150 ecache = &per_cpu(nf_conntrack_ecache, cpu);
152 nf_ct_put(ecache->ct);
156 static inline void nf_ct_event_cache_flush(void) {}
157 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
159 DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
160 EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
163 * This scheme offers various size of "struct nf_conn" dependent on
164 * features(helper, nat, ...)
167 #define NF_CT_FEATURES_NAMELEN 256
169 /* name of slab cache. printed in /proc/slabinfo */
172 /* size of slab cache */
175 /* slab cache pointer */
176 kmem_cache_t *cachep;
178 /* allocated slab cache + modules which uses this slab cache */
181 } nf_ct_cache[NF_CT_F_NUM];
183 /* protect members of nf_ct_cache except of "use" */
184 DEFINE_RWLOCK(nf_ct_cache_lock);
186 /* This avoids calling kmem_cache_create() with same name simultaneously */
187 static DEFINE_MUTEX(nf_ct_cache_mutex);
189 extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
190 struct nf_conntrack_protocol *
191 __nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
193 if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
194 return &nf_conntrack_generic_protocol;
196 return nf_ct_protos[l3proto][protocol];
199 /* this is guaranteed to always return a valid protocol helper, since
200 * it falls back to generic_protocol */
201 struct nf_conntrack_protocol *
202 nf_ct_proto_find_get(u_int16_t l3proto, u_int8_t protocol)
204 struct nf_conntrack_protocol *p;
207 p = __nf_ct_proto_find(l3proto, protocol);
208 if (!try_module_get(p->me))
209 p = &nf_conntrack_generic_protocol;
215 void nf_ct_proto_put(struct nf_conntrack_protocol *p)
220 struct nf_conntrack_l3proto *
221 nf_ct_l3proto_find_get(u_int16_t l3proto)
223 struct nf_conntrack_l3proto *p;
226 p = __nf_ct_l3proto_find(l3proto);
227 if (!try_module_get(p->me))
228 p = &nf_conntrack_generic_l3proto;
234 void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p)
240 nf_ct_l3proto_try_module_get(unsigned short l3proto)
243 struct nf_conntrack_l3proto *p;
245 retry: p = nf_ct_l3proto_find_get(l3proto);
246 if (p == &nf_conntrack_generic_l3proto) {
247 ret = request_module("nf_conntrack-%d", l3proto);
257 void nf_ct_l3proto_module_put(unsigned short l3proto)
259 struct nf_conntrack_l3proto *p;
262 p = __nf_ct_l3proto_find(l3proto);
268 static int nf_conntrack_hash_rnd_initted;
269 static unsigned int nf_conntrack_hash_rnd;
271 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
272 unsigned int size, unsigned int rnd)
275 a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
276 ((tuple->src.l3num) << 16) | tuple->dst.protonum);
277 b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
278 (tuple->src.u.all << 16) | tuple->dst.u.all);
280 return jhash_2words(a, b, rnd) % size;
283 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
285 return __hash_conntrack(tuple, nf_conntrack_htable_size,
286 nf_conntrack_hash_rnd);
289 int nf_conntrack_register_cache(u_int32_t features, const char *name,
294 kmem_cache_t *cachep;
296 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
297 features, name, size);
299 if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
300 DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
305 mutex_lock(&nf_ct_cache_mutex);
307 write_lock_bh(&nf_ct_cache_lock);
308 /* e.g: multiple helpers are loaded */
309 if (nf_ct_cache[features].use > 0) {
310 DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
311 if ((!strncmp(nf_ct_cache[features].name, name,
312 NF_CT_FEATURES_NAMELEN))
313 && nf_ct_cache[features].size == size) {
314 DEBUGP("nf_conntrack_register_cache: reusing.\n");
315 nf_ct_cache[features].use++;
320 write_unlock_bh(&nf_ct_cache_lock);
321 mutex_unlock(&nf_ct_cache_mutex);
324 write_unlock_bh(&nf_ct_cache_lock);
327 * The memory space for name of slab cache must be alive until
328 * cache is destroyed.
330 cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
331 if (cache_name == NULL) {
332 DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
337 if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
338 >= NF_CT_FEATURES_NAMELEN) {
339 printk("nf_conntrack_register_cache: name too long\n");
344 cachep = kmem_cache_create(cache_name, size, 0, 0,
347 printk("nf_conntrack_register_cache: Can't create slab cache "
348 "for the features = 0x%x\n", features);
353 write_lock_bh(&nf_ct_cache_lock);
354 nf_ct_cache[features].use = 1;
355 nf_ct_cache[features].size = size;
356 nf_ct_cache[features].cachep = cachep;
357 nf_ct_cache[features].name = cache_name;
358 write_unlock_bh(&nf_ct_cache_lock);
365 mutex_unlock(&nf_ct_cache_mutex);
369 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
370 void nf_conntrack_unregister_cache(u_int32_t features)
372 kmem_cache_t *cachep;
376 * This assures that kmem_cache_create() isn't called before destroying
379 DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
380 mutex_lock(&nf_ct_cache_mutex);
382 write_lock_bh(&nf_ct_cache_lock);
383 if (--nf_ct_cache[features].use > 0) {
384 write_unlock_bh(&nf_ct_cache_lock);
385 mutex_unlock(&nf_ct_cache_mutex);
388 cachep = nf_ct_cache[features].cachep;
389 name = nf_ct_cache[features].name;
390 nf_ct_cache[features].cachep = NULL;
391 nf_ct_cache[features].name = NULL;
392 nf_ct_cache[features].size = 0;
393 write_unlock_bh(&nf_ct_cache_lock);
397 kmem_cache_destroy(cachep);
400 mutex_unlock(&nf_ct_cache_mutex);
404 nf_ct_get_tuple(const struct sk_buff *skb,
406 unsigned int dataoff,
409 struct nf_conntrack_tuple *tuple,
410 const struct nf_conntrack_l3proto *l3proto,
411 const struct nf_conntrack_protocol *protocol)
413 NF_CT_TUPLE_U_BLANK(tuple);
415 tuple->src.l3num = l3num;
416 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
419 tuple->dst.protonum = protonum;
420 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
422 return protocol->pkt_to_tuple(skb, dataoff, tuple);
426 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
427 const struct nf_conntrack_tuple *orig,
428 const struct nf_conntrack_l3proto *l3proto,
429 const struct nf_conntrack_protocol *protocol)
431 NF_CT_TUPLE_U_BLANK(inverse);
433 inverse->src.l3num = orig->src.l3num;
434 if (l3proto->invert_tuple(inverse, orig) == 0)
437 inverse->dst.dir = !orig->dst.dir;
439 inverse->dst.protonum = orig->dst.protonum;
440 return protocol->invert_tuple(inverse, orig);
443 /* nf_conntrack_expect helper functions */
444 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
446 struct nf_conn_help *master_help = nfct_help(exp->master);
448 NF_CT_ASSERT(master_help);
449 ASSERT_WRITE_LOCK(&nf_conntrack_lock);
450 NF_CT_ASSERT(!timer_pending(&exp->timeout));
452 list_del(&exp->list);
453 NF_CT_STAT_INC(expect_delete);
454 master_help->expecting--;
455 nf_conntrack_expect_put(exp);
458 static void expectation_timed_out(unsigned long ul_expect)
460 struct nf_conntrack_expect *exp = (void *)ul_expect;
462 write_lock_bh(&nf_conntrack_lock);
463 nf_ct_unlink_expect(exp);
464 write_unlock_bh(&nf_conntrack_lock);
465 nf_conntrack_expect_put(exp);
468 struct nf_conntrack_expect *
469 __nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
471 struct nf_conntrack_expect *i;
473 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
474 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
482 /* Just find a expectation corresponding to a tuple. */
483 struct nf_conntrack_expect *
484 nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
486 struct nf_conntrack_expect *i;
488 read_lock_bh(&nf_conntrack_lock);
489 i = __nf_conntrack_expect_find(tuple);
490 read_unlock_bh(&nf_conntrack_lock);
495 /* If an expectation for this connection is found, it gets delete from
496 * global list then returned. */
497 static struct nf_conntrack_expect *
498 find_expectation(const struct nf_conntrack_tuple *tuple)
500 struct nf_conntrack_expect *i;
502 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
503 /* If master is not in hash table yet (ie. packet hasn't left
504 this machine yet), how can other end know about expected?
505 Hence these are not the droids you are looking for (if
506 master ct never got confirmed, we'd hold a reference to it
507 and weird things would happen to future packets). */
508 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
509 && nf_ct_is_confirmed(i->master)) {
510 if (i->flags & NF_CT_EXPECT_PERMANENT) {
513 } else if (del_timer(&i->timeout)) {
514 nf_ct_unlink_expect(i);
522 /* delete all expectations for this conntrack */
523 void nf_ct_remove_expectations(struct nf_conn *ct)
525 struct nf_conntrack_expect *i, *tmp;
526 struct nf_conn_help *help = nfct_help(ct);
528 /* Optimization: most connection never expect any others. */
529 if (!help || help->expecting == 0)
532 list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
533 if (i->master == ct && del_timer(&i->timeout)) {
534 nf_ct_unlink_expect(i);
535 nf_conntrack_expect_put(i);
541 clean_from_lists(struct nf_conn *ct)
545 DEBUGP("clean_from_lists(%p)\n", ct);
546 ASSERT_WRITE_LOCK(&nf_conntrack_lock);
548 ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
549 hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
550 LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
551 LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
553 /* Destroy all pending expectations */
554 nf_ct_remove_expectations(ct);
558 destroy_conntrack(struct nf_conntrack *nfct)
560 struct nf_conn *ct = (struct nf_conn *)nfct;
561 struct nf_conntrack_l3proto *l3proto;
562 struct nf_conntrack_protocol *proto;
564 DEBUGP("destroy_conntrack(%p)\n", ct);
565 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
566 NF_CT_ASSERT(!timer_pending(&ct->timeout));
568 nf_conntrack_event(IPCT_DESTROY, ct);
569 set_bit(IPS_DYING_BIT, &ct->status);
571 /* To make sure we don't get any weird locking issues here:
572 * destroy_conntrack() MUST NOT be called with a write lock
573 * to nf_conntrack_lock!!! -HW */
574 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
575 if (l3proto && l3proto->destroy)
576 l3proto->destroy(ct);
578 proto = __nf_ct_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
579 if (proto && proto->destroy)
582 if (nf_conntrack_destroyed)
583 nf_conntrack_destroyed(ct);
585 write_lock_bh(&nf_conntrack_lock);
586 /* Expectations will have been removed in clean_from_lists,
587 * except TFTP can create an expectation on the first packet,
588 * before connection is in the list, so we need to clean here,
590 nf_ct_remove_expectations(ct);
592 /* We overload first tuple to link into unconfirmed list. */
593 if (!nf_ct_is_confirmed(ct)) {
594 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
595 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
598 NF_CT_STAT_INC(delete);
599 write_unlock_bh(&nf_conntrack_lock);
602 nf_ct_put(ct->master);
604 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
605 nf_conntrack_free(ct);
608 static void death_by_timeout(unsigned long ul_conntrack)
610 struct nf_conn *ct = (void *)ul_conntrack;
612 write_lock_bh(&nf_conntrack_lock);
613 /* Inside lock so preempt is disabled on module removal path.
614 * Otherwise we can get spurious warnings. */
615 NF_CT_STAT_INC(delete_list);
616 clean_from_lists(ct);
617 write_unlock_bh(&nf_conntrack_lock);
622 conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i,
623 const struct nf_conntrack_tuple *tuple,
624 const struct nf_conn *ignored_conntrack)
626 ASSERT_READ_LOCK(&nf_conntrack_lock);
627 return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack
628 && nf_ct_tuple_equal(tuple, &i->tuple);
631 struct nf_conntrack_tuple_hash *
632 __nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
633 const struct nf_conn *ignored_conntrack)
635 struct nf_conntrack_tuple_hash *h;
636 unsigned int hash = hash_conntrack(tuple);
638 ASSERT_READ_LOCK(&nf_conntrack_lock);
639 list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
640 if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
641 NF_CT_STAT_INC(found);
644 NF_CT_STAT_INC(searched);
650 /* Find a connection corresponding to a tuple. */
651 struct nf_conntrack_tuple_hash *
652 nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
653 const struct nf_conn *ignored_conntrack)
655 struct nf_conntrack_tuple_hash *h;
657 read_lock_bh(&nf_conntrack_lock);
658 h = __nf_conntrack_find(tuple, ignored_conntrack);
660 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
661 read_unlock_bh(&nf_conntrack_lock);
666 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
668 unsigned int repl_hash)
670 ct->id = ++nf_conntrack_next_id;
671 list_prepend(&nf_conntrack_hash[hash],
672 &ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
673 list_prepend(&nf_conntrack_hash[repl_hash],
674 &ct->tuplehash[IP_CT_DIR_REPLY].list);
677 void nf_conntrack_hash_insert(struct nf_conn *ct)
679 unsigned int hash, repl_hash;
681 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
682 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
684 write_lock_bh(&nf_conntrack_lock);
685 __nf_conntrack_hash_insert(ct, hash, repl_hash);
686 write_unlock_bh(&nf_conntrack_lock);
689 /* Confirm a connection given skb; places it in hash table */
691 __nf_conntrack_confirm(struct sk_buff **pskb)
693 unsigned int hash, repl_hash;
695 enum ip_conntrack_info ctinfo;
697 ct = nf_ct_get(*pskb, &ctinfo);
699 /* ipt_REJECT uses nf_conntrack_attach to attach related
700 ICMP/TCP RST packets in other direction. Actual packet
701 which created connection will be IP_CT_NEW or for an
702 expected connection, IP_CT_RELATED. */
703 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
706 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
707 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
709 /* We're not in hash table, and we refuse to set up related
710 connections for unconfirmed conns. But packet copies and
711 REJECT will give spurious warnings here. */
712 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
714 /* No external references means noone else could have
716 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
717 DEBUGP("Confirming conntrack %p\n", ct);
719 write_lock_bh(&nf_conntrack_lock);
721 /* See if there's one in the list already, including reverse:
722 NAT could have grabbed it without realizing, since we're
723 not in the hash. If there is, we lost race. */
724 if (!LIST_FIND(&nf_conntrack_hash[hash],
726 struct nf_conntrack_tuple_hash *,
727 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
728 && !LIST_FIND(&nf_conntrack_hash[repl_hash],
730 struct nf_conntrack_tuple_hash *,
731 &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
732 struct nf_conn_help *help;
733 /* Remove from unconfirmed list */
734 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
736 __nf_conntrack_hash_insert(ct, hash, repl_hash);
737 /* Timer relative to confirmation time, not original
738 setting time, otherwise we'd get timer wrap in
739 weird delay cases. */
740 ct->timeout.expires += jiffies;
741 add_timer(&ct->timeout);
742 atomic_inc(&ct->ct_general.use);
743 set_bit(IPS_CONFIRMED_BIT, &ct->status);
744 NF_CT_STAT_INC(insert);
745 write_unlock_bh(&nf_conntrack_lock);
746 help = nfct_help(ct);
747 if (help && help->helper)
748 nf_conntrack_event_cache(IPCT_HELPER, *pskb);
749 #ifdef CONFIG_NF_NAT_NEEDED
750 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
751 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
752 nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
754 nf_conntrack_event_cache(master_ct(ct) ?
755 IPCT_RELATED : IPCT_NEW, *pskb);
759 NF_CT_STAT_INC(insert_failed);
760 write_unlock_bh(&nf_conntrack_lock);
764 /* Returns true if a connection correspondings to the tuple (required
767 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
768 const struct nf_conn *ignored_conntrack)
770 struct nf_conntrack_tuple_hash *h;
772 read_lock_bh(&nf_conntrack_lock);
773 h = __nf_conntrack_find(tuple, ignored_conntrack);
774 read_unlock_bh(&nf_conntrack_lock);
779 /* There's a small race here where we may free a just-assured
780 connection. Too bad: we're in trouble anyway. */
781 static inline int unreplied(const struct nf_conntrack_tuple_hash *i)
783 return !(test_bit(IPS_ASSURED_BIT,
784 &nf_ct_tuplehash_to_ctrack(i)->status));
787 static int early_drop(struct list_head *chain)
789 /* Traverse backwards: gives us oldest, which is roughly LRU */
790 struct nf_conntrack_tuple_hash *h;
791 struct nf_conn *ct = NULL;
794 read_lock_bh(&nf_conntrack_lock);
795 h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *);
797 ct = nf_ct_tuplehash_to_ctrack(h);
798 atomic_inc(&ct->ct_general.use);
800 read_unlock_bh(&nf_conntrack_lock);
805 if (del_timer(&ct->timeout)) {
806 death_by_timeout((unsigned long)ct);
808 NF_CT_STAT_INC(early_drop);
814 static inline int helper_cmp(const struct nf_conntrack_helper *i,
815 const struct nf_conntrack_tuple *rtuple)
817 return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
820 static struct nf_conntrack_helper *
821 __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
823 return LIST_FIND(&helpers, helper_cmp,
824 struct nf_conntrack_helper *,
828 struct nf_conntrack_helper *
829 nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple)
831 struct nf_conntrack_helper *helper;
833 /* need nf_conntrack_lock to assure that helper exists until
834 * try_module_get() is called */
835 read_lock_bh(&nf_conntrack_lock);
837 helper = __nf_ct_helper_find(tuple);
839 /* need to increase module usage count to assure helper will
840 * not go away while the caller is e.g. busy putting a
841 * conntrack in the hash that uses the helper */
842 if (!try_module_get(helper->me))
846 read_unlock_bh(&nf_conntrack_lock);
851 void nf_ct_helper_put(struct nf_conntrack_helper *helper)
853 module_put(helper->me);
856 static struct nf_conn *
857 __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
858 const struct nf_conntrack_tuple *repl,
859 const struct nf_conntrack_l3proto *l3proto)
861 struct nf_conn *conntrack = NULL;
862 u_int32_t features = 0;
863 struct nf_conntrack_helper *helper;
865 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
866 get_random_bytes(&nf_conntrack_hash_rnd, 4);
867 nf_conntrack_hash_rnd_initted = 1;
871 && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) {
872 unsigned int hash = hash_conntrack(orig);
873 /* Try dropping from this hash chain. */
874 if (!early_drop(&nf_conntrack_hash[hash])) {
877 "nf_conntrack: table full, dropping"
879 return ERR_PTR(-ENOMEM);
883 /* find features needed by this conntrack. */
884 features = l3proto->get_features(orig);
886 /* FIXME: protect helper list per RCU */
887 read_lock_bh(&nf_conntrack_lock);
888 helper = __nf_ct_helper_find(repl);
890 features |= NF_CT_F_HELP;
891 read_unlock_bh(&nf_conntrack_lock);
893 DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
895 read_lock_bh(&nf_ct_cache_lock);
897 if (unlikely(!nf_ct_cache[features].use)) {
898 DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
903 conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
904 if (conntrack == NULL) {
905 DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
909 memset(conntrack, 0, nf_ct_cache[features].size);
910 conntrack->features = features;
912 struct nf_conn_help *help = nfct_help(conntrack);
914 help->helper = helper;
917 atomic_set(&conntrack->ct_general.use, 1);
918 conntrack->ct_general.destroy = destroy_conntrack;
919 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
920 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
921 /* Don't set timer yet: wait for confirmation */
922 init_timer(&conntrack->timeout);
923 conntrack->timeout.data = (unsigned long)conntrack;
924 conntrack->timeout.function = death_by_timeout;
926 atomic_inc(&nf_conntrack_count);
928 read_unlock_bh(&nf_ct_cache_lock);
932 struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
933 const struct nf_conntrack_tuple *repl)
935 struct nf_conntrack_l3proto *l3proto;
937 l3proto = __nf_ct_l3proto_find(orig->src.l3num);
938 return __nf_conntrack_alloc(orig, repl, l3proto);
941 void nf_conntrack_free(struct nf_conn *conntrack)
943 u_int32_t features = conntrack->features;
944 NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
945 DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
947 kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
948 atomic_dec(&nf_conntrack_count);
951 /* Allocate a new conntrack: we return -ENOMEM if classification
952 failed due to stress. Otherwise it really is unclassifiable. */
953 static struct nf_conntrack_tuple_hash *
954 init_conntrack(const struct nf_conntrack_tuple *tuple,
955 struct nf_conntrack_l3proto *l3proto,
956 struct nf_conntrack_protocol *protocol,
958 unsigned int dataoff)
960 struct nf_conn *conntrack;
961 struct nf_conntrack_tuple repl_tuple;
962 struct nf_conntrack_expect *exp;
964 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
965 DEBUGP("Can't invert tuple.\n");
969 conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
970 if (conntrack == NULL || IS_ERR(conntrack)) {
971 DEBUGP("Can't allocate conntrack.\n");
972 return (struct nf_conntrack_tuple_hash *)conntrack;
975 if (!protocol->new(conntrack, skb, dataoff)) {
976 nf_conntrack_free(conntrack);
977 DEBUGP("init conntrack: can't track with proto module\n");
981 write_lock_bh(&nf_conntrack_lock);
982 exp = find_expectation(tuple);
985 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
987 /* Welcome, Mr. Bond. We've been expecting you... */
988 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
989 conntrack->master = exp->master;
990 #ifdef CONFIG_NF_CONNTRACK_MARK
991 conntrack->mark = exp->master->mark;
993 #ifdef CONFIG_NF_CONNTRACK_SECMARK
994 conntrack->secmark = exp->master->secmark;
996 nf_conntrack_get(&conntrack->master->ct_general);
997 NF_CT_STAT_INC(expect_new);
1001 /* Overload tuple linked list to put us in unconfirmed list. */
1002 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
1004 write_unlock_bh(&nf_conntrack_lock);
1008 exp->expectfn(conntrack, exp);
1009 nf_conntrack_expect_put(exp);
1012 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
1015 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1016 static inline struct nf_conn *
1017 resolve_normal_ct(struct sk_buff *skb,
1018 unsigned int dataoff,
1021 struct nf_conntrack_l3proto *l3proto,
1022 struct nf_conntrack_protocol *proto,
1024 enum ip_conntrack_info *ctinfo)
1026 struct nf_conntrack_tuple tuple;
1027 struct nf_conntrack_tuple_hash *h;
1030 if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
1031 dataoff, l3num, protonum, &tuple, l3proto,
1033 DEBUGP("resolve_normal_ct: Can't get tuple\n");
1037 /* look for tuple match */
1038 h = nf_conntrack_find_get(&tuple, NULL);
1040 h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
1046 ct = nf_ct_tuplehash_to_ctrack(h);
1048 /* It exists; we have (non-exclusive) reference. */
1049 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1050 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
1051 /* Please set reply bit if this packet OK */
1054 /* Once we've had two way comms, always ESTABLISHED. */
1055 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1056 DEBUGP("nf_conntrack_in: normal packet for %p\n", ct);
1057 *ctinfo = IP_CT_ESTABLISHED;
1058 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1059 DEBUGP("nf_conntrack_in: related packet for %p\n", ct);
1060 *ctinfo = IP_CT_RELATED;
1062 DEBUGP("nf_conntrack_in: new packet for %p\n", ct);
1063 *ctinfo = IP_CT_NEW;
1067 skb->nfct = &ct->ct_general;
1068 skb->nfctinfo = *ctinfo;
1073 nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
1076 enum ip_conntrack_info ctinfo;
1077 struct nf_conntrack_l3proto *l3proto;
1078 struct nf_conntrack_protocol *proto;
1079 unsigned int dataoff;
1084 /* Previously seen (loopback or untracked)? Ignore. */
1085 if ((*pskb)->nfct) {
1086 NF_CT_STAT_INC(ignore);
1090 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
1091 if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
1092 DEBUGP("not prepared to track yet or error occured\n");
1096 proto = __nf_ct_proto_find((u_int16_t)pf, protonum);
1098 /* It may be an special packet, error, unclean...
1099 * inverse of the return code tells to the netfilter
1100 * core what to do with the packet. */
1101 if (proto->error != NULL &&
1102 (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
1103 NF_CT_STAT_INC(error);
1104 NF_CT_STAT_INC(invalid);
1108 ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
1109 &set_reply, &ctinfo);
1111 /* Not valid part of a connection */
1112 NF_CT_STAT_INC(invalid);
1117 /* Too stressed to deal. */
1118 NF_CT_STAT_INC(drop);
1122 NF_CT_ASSERT((*pskb)->nfct);
1124 ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
1126 /* Invalid: inverse of the return code tells
1127 * the netfilter core what to do */
1128 DEBUGP("nf_conntrack_in: Can't track with proto module\n");
1129 nf_conntrack_put((*pskb)->nfct);
1130 (*pskb)->nfct = NULL;
1131 NF_CT_STAT_INC(invalid);
1135 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1136 nf_conntrack_event_cache(IPCT_STATUS, *pskb);
1141 int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1142 const struct nf_conntrack_tuple *orig)
1144 return nf_ct_invert_tuple(inverse, orig,
1145 __nf_ct_l3proto_find(orig->src.l3num),
1146 __nf_ct_proto_find(orig->src.l3num,
1147 orig->dst.protonum));
1150 /* Would two expected things clash? */
1151 static inline int expect_clash(const struct nf_conntrack_expect *a,
1152 const struct nf_conntrack_expect *b)
1154 /* Part covered by intersection of masks must be unequal,
1155 otherwise they clash */
1156 struct nf_conntrack_tuple intersect_mask;
1159 intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
1160 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
1161 intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
1162 intersect_mask.dst.protonum = a->mask.dst.protonum
1163 & b->mask.dst.protonum;
1165 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
1166 intersect_mask.src.u3.all[count] =
1167 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
1170 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
1171 intersect_mask.dst.u3.all[count] =
1172 a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
1175 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
1178 static inline int expect_matches(const struct nf_conntrack_expect *a,
1179 const struct nf_conntrack_expect *b)
1181 return a->master == b->master
1182 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
1183 && nf_ct_tuple_equal(&a->mask, &b->mask);
1186 /* Generally a bad idea to call this: could have matched already. */
1187 void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
1189 struct nf_conntrack_expect *i;
1191 write_lock_bh(&nf_conntrack_lock);
1192 /* choose the the oldest expectation to evict */
1193 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
1194 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
1195 nf_ct_unlink_expect(i);
1196 write_unlock_bh(&nf_conntrack_lock);
1197 nf_conntrack_expect_put(i);
1201 write_unlock_bh(&nf_conntrack_lock);
1204 /* We don't increase the master conntrack refcount for non-fulfilled
1205 * conntracks. During the conntrack destruction, the expectations are
1206 * always killed before the conntrack itself */
1207 struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
1209 struct nf_conntrack_expect *new;
1211 new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
1213 DEBUGP("expect_related: OOM allocating expect\n");
1217 atomic_set(&new->use, 1);
1221 void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
1223 if (atomic_dec_and_test(&exp->use))
1224 kmem_cache_free(nf_conntrack_expect_cachep, exp);
1227 static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
1229 struct nf_conn_help *master_help = nfct_help(exp->master);
1231 atomic_inc(&exp->use);
1232 master_help->expecting++;
1233 list_add(&exp->list, &nf_conntrack_expect_list);
1235 init_timer(&exp->timeout);
1236 exp->timeout.data = (unsigned long)exp;
1237 exp->timeout.function = expectation_timed_out;
1238 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
1239 add_timer(&exp->timeout);
1241 exp->id = ++nf_conntrack_expect_next_id;
1242 atomic_inc(&exp->use);
1243 NF_CT_STAT_INC(expect_create);
1246 /* Race with expectations being used means we could have none to find; OK. */
1247 static void evict_oldest_expect(struct nf_conn *master)
1249 struct nf_conntrack_expect *i;
1251 list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
1252 if (i->master == master) {
1253 if (del_timer(&i->timeout)) {
1254 nf_ct_unlink_expect(i);
1255 nf_conntrack_expect_put(i);
1262 static inline int refresh_timer(struct nf_conntrack_expect *i)
1264 struct nf_conn_help *master_help = nfct_help(i->master);
1266 if (!del_timer(&i->timeout))
1269 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
1270 add_timer(&i->timeout);
1274 int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
1276 struct nf_conntrack_expect *i;
1277 struct nf_conn *master = expect->master;
1278 struct nf_conn_help *master_help = nfct_help(master);
1281 NF_CT_ASSERT(master_help);
1283 DEBUGP("nf_conntrack_expect_related %p\n", related_to);
1284 DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
1285 DEBUGP("mask: "); NF_CT_DUMP_TUPLE(&expect->mask);
1287 write_lock_bh(&nf_conntrack_lock);
1288 list_for_each_entry(i, &nf_conntrack_expect_list, list) {
1289 if (expect_matches(i, expect)) {
1290 /* Refresh timer: if it's dying, ignore.. */
1291 if (refresh_timer(i)) {
1295 } else if (expect_clash(i, expect)) {
1300 /* Will be over limit? */
1301 if (master_help->helper->max_expected &&
1302 master_help->expecting >= master_help->helper->max_expected)
1303 evict_oldest_expect(master);
1305 nf_conntrack_expect_insert(expect);
1306 nf_conntrack_expect_event(IPEXP_NEW, expect);
1309 write_unlock_bh(&nf_conntrack_lock);
1313 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
1316 BUG_ON(me->timeout == 0);
1318 ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
1319 sizeof(struct nf_conn)
1320 + sizeof(struct nf_conn_help)
1321 + __alignof__(struct nf_conn_help));
1323 printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
1326 write_lock_bh(&nf_conntrack_lock);
1327 list_prepend(&helpers, me);
1328 write_unlock_bh(&nf_conntrack_lock);
1333 struct nf_conntrack_helper *
1334 __nf_conntrack_helper_find_byname(const char *name)
1336 struct nf_conntrack_helper *h;
1338 list_for_each_entry(h, &helpers, list) {
1339 if (!strcmp(h->name, name))
1346 static inline int unhelp(struct nf_conntrack_tuple_hash *i,
1347 const struct nf_conntrack_helper *me)
1349 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
1350 struct nf_conn_help *help = nfct_help(ct);
1352 if (help && help->helper == me) {
1353 nf_conntrack_event(IPCT_HELPER, ct);
1354 help->helper = NULL;
1359 void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
1362 struct nf_conntrack_expect *exp, *tmp;
1364 /* Need write lock here, to delete helper. */
1365 write_lock_bh(&nf_conntrack_lock);
1366 LIST_DELETE(&helpers, me);
1368 /* Get rid of expectations */
1369 list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
1370 struct nf_conn_help *help = nfct_help(exp->master);
1371 if (help->helper == me && del_timer(&exp->timeout)) {
1372 nf_ct_unlink_expect(exp);
1373 nf_conntrack_expect_put(exp);
1377 /* Get rid of expecteds, set helpers to NULL. */
1378 LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
1379 for (i = 0; i < nf_conntrack_htable_size; i++)
1380 LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
1381 struct nf_conntrack_tuple_hash *, me);
1382 write_unlock_bh(&nf_conntrack_lock);
1384 /* Someone could be still looking at the helper in a bh. */
1388 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1389 void __nf_ct_refresh_acct(struct nf_conn *ct,
1390 enum ip_conntrack_info ctinfo,
1391 const struct sk_buff *skb,
1392 unsigned long extra_jiffies,
1397 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1400 write_lock_bh(&nf_conntrack_lock);
1402 /* Only update if this is not a fixed timeout */
1403 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
1404 write_unlock_bh(&nf_conntrack_lock);
1408 /* If not in hash table, timer will not be active yet */
1409 if (!nf_ct_is_confirmed(ct)) {
1410 ct->timeout.expires = extra_jiffies;
1411 event = IPCT_REFRESH;
1413 /* Need del_timer for race avoidance (may already be dying). */
1414 if (del_timer(&ct->timeout)) {
1415 ct->timeout.expires = jiffies + extra_jiffies;
1416 add_timer(&ct->timeout);
1417 event = IPCT_REFRESH;
1421 #ifdef CONFIG_NF_CT_ACCT
1423 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1424 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1425 skb->len - (unsigned int)(skb->nh.raw - skb->data);
1426 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
1427 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
1428 event |= IPCT_COUNTER_FILLING;
1432 write_unlock_bh(&nf_conntrack_lock);
1434 /* must be unlocked when calling event cache */
1436 nf_conntrack_event_cache(event, skb);
1439 #if defined(CONFIG_NF_CT_NETLINK) || \
1440 defined(CONFIG_NF_CT_NETLINK_MODULE)
1442 #include <linux/netfilter/nfnetlink.h>
1443 #include <linux/netfilter/nfnetlink_conntrack.h>
1444 #include <linux/mutex.h>
1447 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1448 * in ip_conntrack_core, since we don't want the protocols to autoload
1449 * or depend on ctnetlink */
1450 int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb,
1451 const struct nf_conntrack_tuple *tuple)
1453 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
1454 &tuple->src.u.tcp.port);
1455 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
1456 &tuple->dst.u.tcp.port);
1463 static const size_t cta_min_proto[CTA_PROTO_MAX] = {
1464 [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t),
1465 [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t)
1468 int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[],
1469 struct nf_conntrack_tuple *t)
1471 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
1474 if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
1478 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
1480 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
1486 /* Used by ipt_REJECT and ip6t_REJECT. */
1487 void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1490 enum ip_conntrack_info ctinfo;
1492 /* This ICMP is in reverse direction to the packet which caused it */
1493 ct = nf_ct_get(skb, &ctinfo);
1494 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1495 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1497 ctinfo = IP_CT_RELATED;
1499 /* Attach to new skbuff, and increment count */
1500 nskb->nfct = &ct->ct_general;
1501 nskb->nfctinfo = ctinfo;
1502 nf_conntrack_get(nskb->nfct);
1506 do_iter(const struct nf_conntrack_tuple_hash *i,
1507 int (*iter)(struct nf_conn *i, void *data),
1510 return iter(nf_ct_tuplehash_to_ctrack(i), data);
1513 /* Bring out ya dead! */
1514 static struct nf_conntrack_tuple_hash *
1515 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1516 void *data, unsigned int *bucket)
1518 struct nf_conntrack_tuple_hash *h = NULL;
1520 write_lock_bh(&nf_conntrack_lock);
1521 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1522 h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
1523 struct nf_conntrack_tuple_hash *, iter, data);
1528 h = LIST_FIND_W(&unconfirmed, do_iter,
1529 struct nf_conntrack_tuple_hash *, iter, data);
1531 atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
1532 write_unlock_bh(&nf_conntrack_lock);
1538 nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
1540 struct nf_conntrack_tuple_hash *h;
1541 unsigned int bucket = 0;
1543 while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
1544 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
1545 /* Time to push up daises... */
1546 if (del_timer(&ct->timeout))
1547 death_by_timeout((unsigned long)ct);
1548 /* ... else the timer will get him soon. */
1554 static int kill_all(struct nf_conn *i, void *data)
1559 static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
1564 free_pages((unsigned long)hash,
1565 get_order(sizeof(struct list_head) * size));
1568 void nf_conntrack_flush()
1570 nf_ct_iterate_cleanup(kill_all, NULL);
1573 /* Mishearing the voices in his head, our hero wonders how he's
1574 supposed to kill the mall. */
1575 void nf_conntrack_cleanup(void)
1579 ip_ct_attach = NULL;
1581 /* This makes sure all current packets have passed through
1582 netfilter framework. Roll on, two-stage module
1586 nf_ct_event_cache_flush();
1588 nf_conntrack_flush();
1589 if (atomic_read(&nf_conntrack_count) != 0) {
1591 goto i_see_dead_people;
1593 /* wait until all references to nf_conntrack_untracked are dropped */
1594 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1597 for (i = 0; i < NF_CT_F_NUM; i++) {
1598 if (nf_ct_cache[i].use == 0)
1601 NF_CT_ASSERT(nf_ct_cache[i].use == 1);
1602 nf_ct_cache[i].use = 1;
1603 nf_conntrack_unregister_cache(i);
1605 kmem_cache_destroy(nf_conntrack_expect_cachep);
1606 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1607 nf_conntrack_htable_size);
1609 /* free l3proto protocol tables */
1610 for (i = 0; i < PF_MAX; i++)
1611 if (nf_ct_protos[i]) {
1612 kfree(nf_ct_protos[i]);
1613 nf_ct_protos[i] = NULL;
1617 static struct list_head *alloc_hashtable(int size, int *vmalloced)
1619 struct list_head *hash;
1623 hash = (void*)__get_free_pages(GFP_KERNEL,
1624 get_order(sizeof(struct list_head)
1628 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1629 hash = vmalloc(sizeof(struct list_head) * size);
1633 for (i = 0; i < size; i++)
1634 INIT_LIST_HEAD(&hash[i]);
1639 int set_hashsize(const char *val, struct kernel_param *kp)
1641 int i, bucket, hashsize, vmalloced;
1642 int old_vmalloced, old_size;
1644 struct list_head *hash, *old_hash;
1645 struct nf_conntrack_tuple_hash *h;
1647 /* On boot, we can set this without any fancy locking. */
1648 if (!nf_conntrack_htable_size)
1649 return param_set_uint(val, kp);
1651 hashsize = simple_strtol(val, NULL, 0);
1655 hash = alloc_hashtable(hashsize, &vmalloced);
1659 /* We have to rehahs for the new table anyway, so we also can
1660 * use a newrandom seed */
1661 get_random_bytes(&rnd, 4);
1663 write_lock_bh(&nf_conntrack_lock);
1664 for (i = 0; i < nf_conntrack_htable_size; i++) {
1665 while (!list_empty(&nf_conntrack_hash[i])) {
1666 h = list_entry(nf_conntrack_hash[i].next,
1667 struct nf_conntrack_tuple_hash, list);
1669 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1670 list_add_tail(&h->list, &hash[bucket]);
1673 old_size = nf_conntrack_htable_size;
1674 old_vmalloced = nf_conntrack_vmalloc;
1675 old_hash = nf_conntrack_hash;
1677 nf_conntrack_htable_size = hashsize;
1678 nf_conntrack_vmalloc = vmalloced;
1679 nf_conntrack_hash = hash;
1680 nf_conntrack_hash_rnd = rnd;
1681 write_unlock_bh(&nf_conntrack_lock);
1683 free_conntrack_hash(old_hash, old_vmalloced, old_size);
1687 module_param_call(hashsize, set_hashsize, param_get_uint,
1688 &nf_conntrack_htable_size, 0600);
1690 int __init nf_conntrack_init(void)
1695 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1696 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1697 if (!nf_conntrack_htable_size) {
1698 nf_conntrack_htable_size
1699 = (((num_physpages << PAGE_SHIFT) / 16384)
1700 / sizeof(struct list_head));
1701 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1702 nf_conntrack_htable_size = 8192;
1703 if (nf_conntrack_htable_size < 16)
1704 nf_conntrack_htable_size = 16;
1706 nf_conntrack_max = 8 * nf_conntrack_htable_size;
1708 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1709 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1712 nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
1713 &nf_conntrack_vmalloc);
1714 if (!nf_conntrack_hash) {
1715 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1719 ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
1720 sizeof(struct nf_conn));
1722 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1726 nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect",
1727 sizeof(struct nf_conntrack_expect),
1729 if (!nf_conntrack_expect_cachep) {
1730 printk(KERN_ERR "Unable to create nf_expect slab cache\n");
1731 goto err_free_conntrack_slab;
1734 /* Don't NEED lock here, but good form anyway. */
1735 write_lock_bh(&nf_conntrack_lock);
1736 for (i = 0; i < PF_MAX; i++)
1737 nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
1738 write_unlock_bh(&nf_conntrack_lock);
1740 /* For use by REJECT target */
1741 ip_ct_attach = __nf_conntrack_attach;
1743 /* Set up fake conntrack:
1744 - to never be deleted, not in any hashes */
1745 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1746 /* - and look it like as a confirmed connection */
1747 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1751 err_free_conntrack_slab:
1752 nf_conntrack_unregister_cache(NF_CT_F_BASIC);
1754 free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
1755 nf_conntrack_htable_size);