#include <net/netfilter/nf_conntrack_tuple.h>
LIST_HEAD(nf_conntrack_expect_list);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
+
kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
-DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
static unsigned int nf_conntrack_expect_next_id;
/* nf_conntrack_expect helper functions */
master_help->expecting--;
nf_conntrack_expect_put(exp);
}
+EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
static void expectation_timed_out(unsigned long ul_expect)
{
}
return NULL;
}
+EXPORT_SYMBOL_GPL(__nf_conntrack_expect_find);
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
-nf_conntrack_expect_find(const struct nf_conntrack_tuple *tuple)
+nf_conntrack_expect_find_get(const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
return i;
}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get);
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
}
}
}
+EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
/* Would two expected things clash? */
static inline int expect_clash(const struct nf_conntrack_expect *a,
}
write_unlock_bh(&nf_conntrack_lock);
}
+EXPORT_SYMBOL_GPL(nf_conntrack_unexpect_related);
/* We don't increase the master conntrack refcount for non-fulfilled
* conntracks. During the conntrack destruction, the expectations are
atomic_set(&new->use, 1);
return new;
}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_alloc);
+
+void nf_conntrack_expect_init(struct nf_conntrack_expect *exp, int family,
+ union nf_conntrack_address *saddr,
+ union nf_conntrack_address *daddr,
+ u_int8_t proto, __be16 *src, __be16 *dst)
+{
+ int len;
+
+ if (family == AF_INET)
+ len = 4;
+ else
+ len = 16;
+
+ exp->flags = 0;
+ exp->expectfn = NULL;
+ exp->helper = NULL;
+ exp->tuple.src.l3num = family;
+ exp->tuple.dst.protonum = proto;
+ exp->mask.src.l3num = 0xFFFF;
+ exp->mask.dst.protonum = 0xFF;
+
+ if (saddr) {
+ memcpy(&exp->tuple.src.u3, saddr, len);
+ if (sizeof(exp->tuple.src.u3) > len)
+ /* address needs to be cleared for nf_ct_tuple_equal */
+ memset((void *)&exp->tuple.src.u3 + len, 0x00,
+ sizeof(exp->tuple.src.u3) - len);
+ memset(&exp->mask.src.u3, 0xFF, len);
+ if (sizeof(exp->mask.src.u3) > len)
+ memset((void *)&exp->mask.src.u3 + len, 0x00,
+ sizeof(exp->mask.src.u3) - len);
+ } else {
+ memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
+ memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
+ }
+
+ if (daddr) {
+ memcpy(&exp->tuple.dst.u3, daddr, len);
+ if (sizeof(exp->tuple.dst.u3) > len)
+ /* address needs to be cleared for nf_ct_tuple_equal */
+ memset((void *)&exp->tuple.dst.u3 + len, 0x00,
+ sizeof(exp->tuple.dst.u3) - len);
+ memset(&exp->mask.dst.u3, 0xFF, len);
+ if (sizeof(exp->mask.dst.u3) > len)
+ memset((void *)&exp->mask.dst.u3 + len, 0x00,
+ sizeof(exp->mask.dst.u3) - len);
+ } else {
+ memset(&exp->tuple.dst.u3, 0x00, sizeof(exp->tuple.dst.u3));
+ memset(&exp->mask.dst.u3, 0x00, sizeof(exp->mask.dst.u3));
+ }
+
+ if (src) {
+ exp->tuple.src.u.all = (__force u16)*src;
+ exp->mask.src.u.all = 0xFFFF;
+ } else {
+ exp->tuple.src.u.all = 0;
+ exp->mask.src.u.all = 0;
+ }
+
+ if (dst) {
+ exp->tuple.dst.u.all = (__force u16)*dst;
+ exp->mask.dst.u.all = 0xFFFF;
+ } else {
+ exp->tuple.dst.u.all = 0;
+ exp->mask.dst.u.all = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_init);
void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
{
if (atomic_dec_and_test(&exp->use))
kmem_cache_free(nf_conntrack_expect_cachep, exp);
}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_put);
static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
{
write_unlock_bh(&nf_conntrack_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_related);
#ifdef CONFIG_PROC_FS
static void *exp_seq_start(struct seq_file *s, loff_t *pos)