2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
41 #define dprintf(format, args...)
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
47 #define duprintf(format, args...)
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
58 #define IP_NF_ASSERT(x)
62 /* All the better to debug you with... */
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
74 Hence the start of any table is given by get_table() below. */
76 /* Returns whether matches rule or not. */
78 ip_packet_match(const struct iphdr *ip,
81 const struct ipt_ip *ipinfo,
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
93 dprintf("Source or dest mismatch.\n");
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
135 /* Check specific protocol */
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 ip_checkentry(const struct ipt_ip *ip)
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
204 return (struct ipt_entry *)(base + offset);
207 /* All zeroes == unconditional rule. */
209 unconditional(const struct ipt_ip *ip)
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
247 .logflags = NF_LOG_MASK,
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
283 static void trace_packet(struct sk_buff *skb,
285 const struct net_device *in,
286 const struct net_device *out,
288 struct xt_table_info *private,
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 ipt_do_table(struct sk_buff *skb,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
361 if (IPT_MATCH_ITERATE(e, do_match,
363 offset, &hotdrop) != 0)
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
382 v = ((struct ipt_standard_target *)t)->verdict;
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
390 back = get_entry(table_base,
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
405 e = get_entry(table_base, v);
407 /* Targets which reenter must return
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
413 verdict = t->u.kernel.target->target(skb,
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
427 ((struct ipt_entry *)table_base)->comefrom
430 /* Target might have changed stuff. */
432 datalen = skb->len - ip->ihl * 4;
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
443 e = (void *)e + e->next_offset;
447 read_unlock_bh(&table->lock);
449 #ifdef DEBUG_ALLOW_ALL
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
471 = (struct ipt_entry *)(entry0 + pos);
473 if (!(valid_hooks & (1 << hook)))
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
507 /* Return: backtrack through the last
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
528 e = (struct ipt_entry *)
530 } while (oldpos == pos + e->next_offset);
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
560 e->counters.pcnt = pos;
565 duprintf("Finished chain %u\n", hook);
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip,
604 unsigned int hookmask, unsigned int *i)
606 struct xt_match *match;
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
626 find_check_match(struct ipt_entry_match *m,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
632 struct xt_match *match;
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
642 m->u.kernel.match = match;
644 ret = check_match(m, name, ip, hookmask, i);
650 module_put(m->u.kernel.match->me);
654 static inline int check_target(struct ipt_entry *e, const char *name)
656 struct ipt_entry_target *t;
657 struct xt_target *target;
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target, t->data,
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
679 struct ipt_entry_target *t;
680 struct xt_target *target;
684 ret = check_entry(e, name);
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
692 goto cleanup_matches;
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
704 t->u.kernel.target = target;
706 ret = check_target(e, name);
713 module_put(t->u.kernel.target->me);
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
765 struct ipt_entry_target *t;
767 if (i && (*i)-- == 0)
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
779 /* Checks and translates the user-supplied table segment (held in
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
794 newinfo->size = size;
795 newinfo->number = number;
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
803 duprintf("translate_table: size %u\n", newinfo->size);
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
811 hook_entries, underflows, &i);
816 duprintf("translate_table: %u not %u entries\n",
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
841 /* Finally, each sanity check must pass */
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
897 curcpu = raw_smp_processor_id();
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
902 set_entry_to_counter,
906 for_each_possible_cpu(cpu) {
910 IPT_ENTRY_ITERATE(t->entries[cpu],
912 add_entry_to_counter,
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
946 unsigned int off, num;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
979 sizeof(counters[num])) != 0) {
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1016 #ifdef CONFIG_COMPAT
1017 struct compat_delta {
1018 struct compat_delta *next;
1019 unsigned int offset;
1023 static struct compat_delta *compat_offsets;
1025 static int compat_add_offset(unsigned int offset, short delta)
1027 struct compat_delta *tmp;
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1032 tmp->offset = offset;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1038 compat_offsets = tmp;
1044 static void compat_flush_offsets(void)
1046 struct compat_delta *tmp, *next;
1048 if (compat_offsets) {
1049 for (tmp = compat_offsets; tmp; tmp = next) {
1053 compat_offsets = NULL;
1057 static short compat_calc_jump(unsigned int offset)
1059 struct compat_delta *tmp;
1062 for (tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1068 static void compat_standard_from_user(void *dst, void *src)
1070 int v = *(compat_int_t *)src;
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1077 static int compat_standard_to_user(void __user *dst, void *src)
1079 compat_int_t cv = *(int *)src;
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1087 compat_calc_match(struct ipt_entry_match *m, int *size)
1089 *size += xt_compat_match_offset(m->u.kernel.match);
1093 static int compat_calc_entry(struct ipt_entry *e,
1094 const struct xt_table_info *info,
1095 void *base, struct xt_table_info *newinfo)
1097 struct ipt_entry_target *t;
1098 unsigned int entry_offset;
1102 entry_offset = (void *)e - base;
1103 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1104 t = ipt_get_target(e);
1105 off += xt_compat_target_offset(t->u.kernel.target);
1106 newinfo->size -= off;
1107 ret = compat_add_offset(entry_offset, off);
1111 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1112 if (info->hook_entry[i] &&
1113 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1114 newinfo->hook_entry[i] -= off;
1115 if (info->underflow[i] &&
1116 (e < (struct ipt_entry *)(base + info->underflow[i])))
1117 newinfo->underflow[i] -= off;
1122 static int compat_table_info(const struct xt_table_info *info,
1123 struct xt_table_info *newinfo)
1125 void *loc_cpu_entry;
1127 if (!newinfo || !info)
1130 /* we dont care about newinfo->entries[] */
1131 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1132 newinfo->initial_entries = 0;
1133 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1134 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1135 compat_calc_entry, info, loc_cpu_entry,
1140 static int get_info(void __user *user, int *len, int compat)
1142 char name[IPT_TABLE_MAXNAMELEN];
1146 if (*len != sizeof(struct ipt_getinfo)) {
1147 duprintf("length %u != %u\n", *len,
1148 (unsigned int)sizeof(struct ipt_getinfo));
1152 if (copy_from_user(name, user, sizeof(name)) != 0)
1155 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1156 #ifdef CONFIG_COMPAT
1158 xt_compat_lock(AF_INET);
1160 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1161 "iptable_%s", name);
1162 if (t && !IS_ERR(t)) {
1163 struct ipt_getinfo info;
1164 struct xt_table_info *private = t->private;
1166 #ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1169 ret = compat_table_info(private, &tmp);
1170 compat_flush_offsets();
1174 info.valid_hooks = t->valid_hooks;
1175 memcpy(info.hook_entry, private->hook_entry,
1176 sizeof(info.hook_entry));
1177 memcpy(info.underflow, private->underflow,
1178 sizeof(info.underflow));
1179 info.num_entries = private->number;
1180 info.size = private->size;
1181 strcpy(info.name, name);
1183 if (copy_to_user(user, &info, *len) != 0)
1191 ret = t ? PTR_ERR(t) : -ENOENT;
1192 #ifdef CONFIG_COMPAT
1194 xt_compat_unlock(AF_INET);
1200 get_entries(struct ipt_get_entries __user *uptr, int *len)
1203 struct ipt_get_entries get;
1206 if (*len < sizeof(get)) {
1207 duprintf("get_entries: %u < %d\n", *len,
1208 (unsigned int)sizeof(get));
1211 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1213 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1214 duprintf("get_entries: %u != %u\n", *len,
1215 (unsigned int)(sizeof(struct ipt_get_entries) +
1220 t = xt_find_table_lock(AF_INET, get.name);
1221 if (t && !IS_ERR(t)) {
1222 struct xt_table_info *private = t->private;
1223 duprintf("t->private->number = %u\n",
1225 if (get.size == private->size)
1226 ret = copy_entries_to_user(private->size,
1227 t, uptr->entrytable);
1229 duprintf("get_entries: I've got %u not %u!\n",
1237 ret = t ? PTR_ERR(t) : -ENOENT;
1243 __do_replace(const char *name, unsigned int valid_hooks,
1244 struct xt_table_info *newinfo, unsigned int num_counters,
1245 void __user *counters_ptr)
1249 struct xt_table_info *oldinfo;
1250 struct xt_counters *counters;
1251 void *loc_cpu_old_entry;
1254 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1260 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1261 "iptable_%s", name);
1262 if (!t || IS_ERR(t)) {
1263 ret = t ? PTR_ERR(t) : -ENOENT;
1264 goto free_newinfo_counters_untrans;
1268 if (valid_hooks != t->valid_hooks) {
1269 duprintf("Valid hook crap: %08X vs %08X\n",
1270 valid_hooks, t->valid_hooks);
1275 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1279 /* Update module usage count based on number of rules */
1280 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1281 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1282 if ((oldinfo->number > oldinfo->initial_entries) ||
1283 (newinfo->number <= oldinfo->initial_entries))
1285 if ((oldinfo->number > oldinfo->initial_entries) &&
1286 (newinfo->number <= oldinfo->initial_entries))
1289 /* Get the old counters. */
1290 get_counters(oldinfo, counters);
1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1293 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1295 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0)
1306 free_newinfo_counters_untrans:
1313 do_replace(void __user *user, unsigned int len)
1316 struct ipt_replace tmp;
1317 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry;
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1323 /* Hack: Causes ipchains to give correct error msg --RR */
1324 if (len != sizeof(tmp) + tmp.size)
1325 return -ENOPROTOOPT;
1327 /* overflow check */
1328 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1331 newinfo = xt_alloc_table_info(tmp.size);
1335 /* choose the copy that is our node/cpu */
1336 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1337 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1343 ret = translate_table(tmp.name, tmp.valid_hooks,
1344 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1345 tmp.hook_entry, tmp.underflow);
1349 duprintf("ip_tables: Translated table\n");
1351 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1352 tmp.num_counters, tmp.counters);
1354 goto free_newinfo_untrans;
1357 free_newinfo_untrans:
1358 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1360 xt_free_table_info(newinfo);
1364 /* We're lazy, and add to the first CPU; overflow works its fey magic
1365 * and everything is OK. */
1367 add_counter_to_entry(struct ipt_entry *e,
1368 const struct xt_counters addme[],
1372 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1374 (long unsigned int)e->counters.pcnt,
1375 (long unsigned int)e->counters.bcnt,
1376 (long unsigned int)addme[*i].pcnt,
1377 (long unsigned int)addme[*i].bcnt);
1380 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1387 do_add_counters(void __user *user, unsigned int len, int compat)
1390 struct xt_counters_info tmp;
1391 struct xt_counters *paddc;
1392 unsigned int num_counters;
1397 struct xt_table_info *private;
1399 void *loc_cpu_entry;
1400 #ifdef CONFIG_COMPAT
1401 struct compat_xt_counters_info compat_tmp;
1405 size = sizeof(struct compat_xt_counters_info);
1410 size = sizeof(struct xt_counters_info);
1413 if (copy_from_user(ptmp, user, size) != 0)
1416 #ifdef CONFIG_COMPAT
1418 num_counters = compat_tmp.num_counters;
1419 name = compat_tmp.name;
1423 num_counters = tmp.num_counters;
1427 if (len != size + num_counters * sizeof(struct xt_counters))
1430 paddc = vmalloc_node(len - size, numa_node_id());
1434 if (copy_from_user(paddc, user + size, len - size) != 0) {
1439 t = xt_find_table_lock(AF_INET, name);
1440 if (!t || IS_ERR(t)) {
1441 ret = t ? PTR_ERR(t) : -ENOENT;
1445 write_lock_bh(&t->lock);
1446 private = t->private;
1447 if (private->number != num_counters) {
1449 goto unlock_up_free;
1453 /* Choose the copy that is on our node */
1454 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1455 IPT_ENTRY_ITERATE(loc_cpu_entry,
1457 add_counter_to_entry,
1461 write_unlock_bh(&t->lock);
1470 #ifdef CONFIG_COMPAT
1471 struct compat_ipt_replace {
1472 char name[IPT_TABLE_MAXNAMELEN];
1476 u32 hook_entry[NF_INET_NUMHOOKS];
1477 u32 underflow[NF_INET_NUMHOOKS];
1479 compat_uptr_t counters; /* struct ipt_counters * */
1480 struct compat_ipt_entry entries[0];
1484 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1485 compat_uint_t *size, struct xt_counters *counters,
1488 struct ipt_entry_target *t;
1489 struct compat_ipt_entry __user *ce;
1490 u_int16_t target_offset, next_offset;
1491 compat_uint_t origsize;
1496 ce = (struct compat_ipt_entry __user *)*dstptr;
1497 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1500 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1503 *dstptr += sizeof(struct compat_ipt_entry);
1504 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1505 target_offset = e->target_offset - (origsize - *size);
1508 t = ipt_get_target(e);
1509 ret = xt_compat_target_to_user(t, dstptr, size);
1513 next_offset = e->next_offset - (origsize - *size);
1514 if (put_user(target_offset, &ce->target_offset))
1516 if (put_user(next_offset, &ce->next_offset))
1526 compat_find_calc_match(struct ipt_entry_match *m,
1528 const struct ipt_ip *ip,
1529 unsigned int hookmask,
1532 struct xt_match *match;
1534 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1535 m->u.user.revision),
1536 "ipt_%s", m->u.user.name);
1537 if (IS_ERR(match) || !match) {
1538 duprintf("compat_check_calc_match: `%s' not found\n",
1540 return match ? PTR_ERR(match) : -ENOENT;
1542 m->u.kernel.match = match;
1543 *size += xt_compat_match_offset(match);
1550 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1552 if (i && (*i)-- == 0)
1555 module_put(m->u.kernel.match->me);
1560 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1562 struct ipt_entry_target *t;
1564 if (i && (*i)-- == 0)
1567 /* Cleanup all matches */
1568 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1569 t = ipt_get_target(e);
1570 module_put(t->u.kernel.target->me);
1575 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1576 struct xt_table_info *newinfo,
1578 unsigned char *base,
1579 unsigned char *limit,
1580 unsigned int *hook_entries,
1581 unsigned int *underflows,
1585 struct ipt_entry_target *t;
1586 struct xt_target *target;
1587 unsigned int entry_offset;
1590 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1591 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1592 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1593 duprintf("Bad offset %p, limit = %p\n", e, limit);
1597 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1598 sizeof(struct compat_xt_entry_target)) {
1599 duprintf("checking: element %p size %u\n",
1604 ret = check_entry(e, name);
1609 entry_offset = (void *)e - (void *)base;
1611 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1612 e->comefrom, &off, &j);
1614 goto release_matches;
1616 t = ipt_get_target(e);
1617 target = try_then_request_module(xt_find_target(AF_INET,
1619 t->u.user.revision),
1620 "ipt_%s", t->u.user.name);
1621 if (IS_ERR(target) || !target) {
1622 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1624 ret = target ? PTR_ERR(target) : -ENOENT;
1625 goto release_matches;
1627 t->u.kernel.target = target;
1629 off += xt_compat_target_offset(target);
1631 ret = compat_add_offset(entry_offset, off);
1635 /* Check hooks & underflows */
1636 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1637 if ((unsigned char *)e - base == hook_entries[h])
1638 newinfo->hook_entry[h] = hook_entries[h];
1639 if ((unsigned char *)e - base == underflows[h])
1640 newinfo->underflow[h] = underflows[h];
1643 /* Clear counters and comefrom */
1644 e->counters = ((struct ipt_counters) { 0, 0 });
1651 module_put(t->u.kernel.target->me);
1653 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1658 compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1659 unsigned int *size, const char *name,
1660 struct xt_table_info *newinfo, unsigned char *base)
1662 struct ipt_entry_target *t;
1663 struct xt_target *target;
1664 struct ipt_entry *de;
1665 unsigned int origsize;
1670 de = (struct ipt_entry *)*dstptr;
1671 memcpy(de, e, sizeof(struct ipt_entry));
1673 *dstptr += sizeof(struct compat_ipt_entry);
1674 ret = IPT_MATCH_ITERATE(e, xt_compat_match_from_user, dstptr, size);
1677 de->target_offset = e->target_offset - (origsize - *size);
1678 t = ipt_get_target(e);
1679 target = t->u.kernel.target;
1680 xt_compat_target_from_user(t, dstptr, size);
1682 de->next_offset = e->next_offset - (origsize - *size);
1683 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1684 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1685 newinfo->hook_entry[h] -= origsize - *size;
1686 if ((unsigned char *)de - base < newinfo->underflow[h])
1687 newinfo->underflow[h] -= origsize - *size;
1692 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1698 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1700 goto cleanup_matches;
1702 ret = check_target(e, name);
1704 goto cleanup_matches;
1710 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1715 translate_compat_table(const char *name,
1716 unsigned int valid_hooks,
1717 struct xt_table_info **pinfo,
1719 unsigned int total_size,
1720 unsigned int number,
1721 unsigned int *hook_entries,
1722 unsigned int *underflows)
1725 struct xt_table_info *newinfo, *info;
1726 void *pos, *entry0, *entry1;
1733 info->number = number;
1735 /* Init all hooks to impossible value. */
1736 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1737 info->hook_entry[i] = 0xFFFFFFFF;
1738 info->underflow[i] = 0xFFFFFFFF;
1741 duprintf("translate_compat_table: size %u\n", info->size);
1743 xt_compat_lock(AF_INET);
1744 /* Walk through entries, checking offsets. */
1745 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1746 check_compat_entry_size_and_hooks,
1747 info, &size, entry0,
1748 entry0 + total_size,
1749 hook_entries, underflows, &j, name);
1755 duprintf("translate_compat_table: %u not %u entries\n",
1760 /* Check hooks all assigned */
1761 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1762 /* Only hooks which are valid */
1763 if (!(valid_hooks & (1 << i)))
1765 if (info->hook_entry[i] == 0xFFFFFFFF) {
1766 duprintf("Invalid hook entry %u %u\n",
1767 i, hook_entries[i]);
1770 if (info->underflow[i] == 0xFFFFFFFF) {
1771 duprintf("Invalid underflow %u %u\n",
1778 newinfo = xt_alloc_table_info(size);
1782 newinfo->number = number;
1783 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1784 newinfo->hook_entry[i] = info->hook_entry[i];
1785 newinfo->underflow[i] = info->underflow[i];
1787 entry1 = newinfo->entries[raw_smp_processor_id()];
1790 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1791 compat_copy_entry_from_user, &pos, &size,
1792 name, newinfo, entry1);
1793 compat_flush_offsets();
1794 xt_compat_unlock(AF_INET);
1799 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1803 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1807 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1808 compat_release_entry, &j);
1809 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1810 xt_free_table_info(newinfo);
1814 /* And one copy for every other CPU */
1815 for_each_possible_cpu(i)
1816 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1817 memcpy(newinfo->entries[i], entry1, newinfo->size);
1821 xt_free_table_info(info);
1825 xt_free_table_info(newinfo);
1827 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1830 compat_flush_offsets();
1831 xt_compat_unlock(AF_INET);
1836 compat_do_replace(void __user *user, unsigned int len)
1839 struct compat_ipt_replace tmp;
1840 struct xt_table_info *newinfo;
1841 void *loc_cpu_entry;
1843 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1846 /* Hack: Causes ipchains to give correct error msg --RR */
1847 if (len != sizeof(tmp) + tmp.size)
1848 return -ENOPROTOOPT;
1850 /* overflow check */
1851 if (tmp.size >= INT_MAX / num_possible_cpus())
1853 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1856 newinfo = xt_alloc_table_info(tmp.size);
1860 /* choose the copy that is our node/cpu */
1861 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1862 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1868 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1869 &newinfo, &loc_cpu_entry, tmp.size,
1870 tmp.num_entries, tmp.hook_entry,
1875 duprintf("compat_do_replace: Translated table\n");
1877 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1878 tmp.num_counters, compat_ptr(tmp.counters));
1880 goto free_newinfo_untrans;
1883 free_newinfo_untrans:
1884 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1886 xt_free_table_info(newinfo);
1891 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1896 if (!capable(CAP_NET_ADMIN))
1900 case IPT_SO_SET_REPLACE:
1901 ret = compat_do_replace(user, len);
1904 case IPT_SO_SET_ADD_COUNTERS:
1905 ret = do_add_counters(user, len, 1);
1909 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1916 struct compat_ipt_get_entries {
1917 char name[IPT_TABLE_MAXNAMELEN];
1919 struct compat_ipt_entry entrytable[0];
1923 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1924 void __user *userptr)
1926 struct xt_counters *counters;
1927 struct xt_table_info *private = table->private;
1931 void *loc_cpu_entry;
1934 counters = alloc_counters(table);
1935 if (IS_ERR(counters))
1936 return PTR_ERR(counters);
1938 /* choose the copy that is on our node/cpu, ...
1939 * This choice is lazy (because current thread is
1940 * allowed to migrate to another cpu)
1942 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1945 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1946 compat_copy_entry_to_user,
1947 &pos, &size, counters, &i);
1954 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1957 struct compat_ipt_get_entries get;
1960 if (*len < sizeof(get)) {
1961 duprintf("compat_get_entries: %u < %u\n",
1962 *len, (unsigned int)sizeof(get));
1966 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1969 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1970 duprintf("compat_get_entries: %u != %u\n", *len,
1971 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1976 xt_compat_lock(AF_INET);
1977 t = xt_find_table_lock(AF_INET, get.name);
1978 if (t && !IS_ERR(t)) {
1979 struct xt_table_info *private = t->private;
1980 struct xt_table_info info;
1981 duprintf("t->private->number = %u\n",
1983 ret = compat_table_info(private, &info);
1984 if (!ret && get.size == info.size) {
1985 ret = compat_copy_entries_to_user(private->size,
1986 t, uptr->entrytable);
1988 duprintf("compat_get_entries: I've got %u not %u!\n",
1993 compat_flush_offsets();
1997 ret = t ? PTR_ERR(t) : -ENOENT;
1999 xt_compat_unlock(AF_INET);
2003 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2006 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2010 if (!capable(CAP_NET_ADMIN))
2014 case IPT_SO_GET_INFO:
2015 ret = get_info(user, len, 1);
2017 case IPT_SO_GET_ENTRIES:
2018 ret = compat_get_entries(user, len);
2021 ret = do_ipt_get_ctl(sk, cmd, user, len);
2028 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2032 if (!capable(CAP_NET_ADMIN))
2036 case IPT_SO_SET_REPLACE:
2037 ret = do_replace(user, len);
2040 case IPT_SO_SET_ADD_COUNTERS:
2041 ret = do_add_counters(user, len, 0);
2045 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2053 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2057 if (!capable(CAP_NET_ADMIN))
2061 case IPT_SO_GET_INFO:
2062 ret = get_info(user, len, 0);
2065 case IPT_SO_GET_ENTRIES:
2066 ret = get_entries(user, len);
2069 case IPT_SO_GET_REVISION_MATCH:
2070 case IPT_SO_GET_REVISION_TARGET: {
2071 struct ipt_get_revision rev;
2074 if (*len != sizeof(rev)) {
2078 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2083 if (cmd == IPT_SO_GET_REVISION_TARGET)
2088 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2091 "ipt_%s", rev.name);
2096 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2103 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2106 struct xt_table_info *newinfo;
2107 struct xt_table_info bootstrap
2108 = { 0, 0, 0, { 0 }, { 0 }, { } };
2109 void *loc_cpu_entry;
2111 newinfo = xt_alloc_table_info(repl->size);
2115 /* choose the copy on our node/cpu
2116 * but dont care of preemption
2118 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2119 memcpy(loc_cpu_entry, repl->entries, repl->size);
2121 ret = translate_table(table->name, table->valid_hooks,
2122 newinfo, loc_cpu_entry, repl->size,
2127 xt_free_table_info(newinfo);
2131 ret = xt_register_table(table, &bootstrap, newinfo);
2133 xt_free_table_info(newinfo);
2140 void ipt_unregister_table(struct xt_table *table)
2142 struct xt_table_info *private;
2143 void *loc_cpu_entry;
2145 private = xt_unregister_table(table);
2147 /* Decrease module usage counts and free resources */
2148 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2149 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2150 xt_free_table_info(private);
2153 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2155 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2156 u_int8_t type, u_int8_t code,
2159 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2164 icmp_match(const struct sk_buff *skb,
2165 const struct net_device *in,
2166 const struct net_device *out,
2167 const struct xt_match *match,
2168 const void *matchinfo,
2170 unsigned int protoff,
2173 struct icmphdr _icmph, *ic;
2174 const struct ipt_icmp *icmpinfo = matchinfo;
2176 /* Must not be a fragment. */
2180 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2182 /* We've been asked to examine this packet, and we
2183 * can't. Hence, no choice but to drop.
2185 duprintf("Dropping evil ICMP tinygram.\n");
2190 return icmp_type_code_match(icmpinfo->type,
2194 !!(icmpinfo->invflags&IPT_ICMP_INV));
2197 /* Called when user tries to insert an entry of this type. */
2199 icmp_checkentry(const char *tablename,
2201 const struct xt_match *match,
2203 unsigned int hook_mask)
2205 const struct ipt_icmp *icmpinfo = matchinfo;
2207 /* Must specify no unknown invflags */
2208 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2211 /* The built-in targets: standard (NULL) and error. */
2212 static struct xt_target ipt_standard_target __read_mostly = {
2213 .name = IPT_STANDARD_TARGET,
2214 .targetsize = sizeof(int),
2216 #ifdef CONFIG_COMPAT
2217 .compatsize = sizeof(compat_int_t),
2218 .compat_from_user = compat_standard_from_user,
2219 .compat_to_user = compat_standard_to_user,
2223 static struct xt_target ipt_error_target __read_mostly = {
2224 .name = IPT_ERROR_TARGET,
2225 .target = ipt_error,
2226 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2230 static struct nf_sockopt_ops ipt_sockopts = {
2232 .set_optmin = IPT_BASE_CTL,
2233 .set_optmax = IPT_SO_SET_MAX+1,
2234 .set = do_ipt_set_ctl,
2235 #ifdef CONFIG_COMPAT
2236 .compat_set = compat_do_ipt_set_ctl,
2238 .get_optmin = IPT_BASE_CTL,
2239 .get_optmax = IPT_SO_GET_MAX+1,
2240 .get = do_ipt_get_ctl,
2241 #ifdef CONFIG_COMPAT
2242 .compat_get = compat_do_ipt_get_ctl,
2244 .owner = THIS_MODULE,
2247 static struct xt_match icmp_matchstruct __read_mostly = {
2249 .match = icmp_match,
2250 .matchsize = sizeof(struct ipt_icmp),
2251 .proto = IPPROTO_ICMP,
2253 .checkentry = icmp_checkentry,
2256 static int __init ip_tables_init(void)
2260 ret = xt_proto_init(AF_INET);
2264 /* Noone else will be downing sem now, so we won't sleep */
2265 ret = xt_register_target(&ipt_standard_target);
2268 ret = xt_register_target(&ipt_error_target);
2271 ret = xt_register_match(&icmp_matchstruct);
2275 /* Register setsockopt */
2276 ret = nf_register_sockopt(&ipt_sockopts);
2280 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2284 xt_unregister_match(&icmp_matchstruct);
2286 xt_unregister_target(&ipt_error_target);
2288 xt_unregister_target(&ipt_standard_target);
2290 xt_proto_fini(AF_INET);
2295 static void __exit ip_tables_fini(void)
2297 nf_unregister_sockopt(&ipt_sockopts);
2299 xt_unregister_match(&icmp_matchstruct);
2300 xt_unregister_target(&ipt_error_target);
2301 xt_unregister_target(&ipt_standard_target);
2303 xt_proto_fini(AF_INET);
2306 EXPORT_SYMBOL(ipt_register_table);
2307 EXPORT_SYMBOL(ipt_unregister_table);
2308 EXPORT_SYMBOL(ipt_do_table);
2309 module_init(ip_tables_init);
2310 module_exit(ip_tables_fini);