2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29 #include <net/netfilter/nf_log.h>
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
33 MODULE_DESCRIPTION("IPv4 packet filter");
35 /*#define DEBUG_IP_FIREWALL*/
36 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
37 /*#define DEBUG_IP_FIREWALL_USER*/
39 #ifdef DEBUG_IP_FIREWALL
40 #define dprintf(format, args...) printk(format , ## args)
42 #define dprintf(format, args...)
45 #ifdef DEBUG_IP_FIREWALL_USER
46 #define duprintf(format, args...) printk(format , ## args)
48 #define duprintf(format, args...)
51 #ifdef CONFIG_NETFILTER_DEBUG
52 #define IP_NF_ASSERT(x) \
55 printk("IP_NF_ASSERT: %s:%s:%u\n", \
56 __FUNCTION__, __FILE__, __LINE__); \
59 #define IP_NF_ASSERT(x)
63 /* All the better to debug you with... */
69 We keep a set of rules for each CPU, so we can avoid write-locking
70 them in the softirq when updating the counters and therefore
71 only need to read-lock in the softirq; doing a write_lock_bh() in user
72 context stops packets coming through and allows user context to read
73 the counters or update the rules.
75 Hence the start of any table is given by get_table() below. */
77 /* Returns whether matches rule or not. */
79 ip_packet_match(const struct iphdr *ip,
82 const struct ipt_ip *ipinfo,
88 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
90 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
92 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
94 dprintf("Source or dest mismatch.\n");
96 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
98 NIPQUAD(ipinfo->smsk.s_addr),
99 NIPQUAD(ipinfo->src.s_addr),
100 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
101 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ipinfo->dmsk.s_addr),
104 NIPQUAD(ipinfo->dst.s_addr),
105 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
109 /* Look for ifname matches; this should unroll nicely. */
110 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
111 ret |= (((const unsigned long *)indev)[i]
112 ^ ((const unsigned long *)ipinfo->iniface)[i])
113 & ((const unsigned long *)ipinfo->iniface_mask)[i];
116 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
117 dprintf("VIA in mismatch (%s vs %s).%s\n",
118 indev, ipinfo->iniface,
119 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
123 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
124 ret |= (((const unsigned long *)outdev)[i]
125 ^ ((const unsigned long *)ipinfo->outiface)[i])
126 & ((const unsigned long *)ipinfo->outiface_mask)[i];
129 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
130 dprintf("VIA out mismatch (%s vs %s).%s\n",
131 outdev, ipinfo->outiface,
132 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
136 /* Check specific protocol */
138 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
139 dprintf("Packet protocol %hi does not match %hi.%s\n",
140 ip->protocol, ipinfo->proto,
141 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
145 /* If we have a fragment rule but the packet is not a fragment
146 * then we return zero */
147 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
148 dprintf("Fragment rule but not fragment.%s\n",
149 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
157 ip_checkentry(const struct ipt_ip *ip)
159 if (ip->flags & ~IPT_F_MASK) {
160 duprintf("Unknown flag bits set: %08X\n",
161 ip->flags & ~IPT_F_MASK);
164 if (ip->invflags & ~IPT_INV_MASK) {
165 duprintf("Unknown invflag bits set: %08X\n",
166 ip->invflags & ~IPT_INV_MASK);
173 ipt_error(struct sk_buff *skb,
174 const struct net_device *in,
175 const struct net_device *out,
176 unsigned int hooknum,
177 const struct xt_target *target,
178 const void *targinfo)
181 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187 bool do_match(struct ipt_entry_match *m,
188 const struct sk_buff *skb,
189 const struct net_device *in,
190 const struct net_device *out,
194 /* Stop iteration if it doesn't match */
195 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
196 offset, ip_hdrlen(skb), hotdrop))
202 static inline struct ipt_entry *
203 get_entry(void *base, unsigned int offset)
205 return (struct ipt_entry *)(base + offset);
208 /* All zeroes == unconditional rule. */
210 unconditional(const struct ipt_ip *ip)
214 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
215 if (((__u32 *)ip)[i])
222 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
223 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
224 static const char *hooknames[] = {
225 [NF_INET_PRE_ROUTING] = "PREROUTING",
226 [NF_INET_LOCAL_IN] = "INPUT",
227 [NF_INET_FORWARD] = "FORWARD",
228 [NF_INET_LOCAL_OUT] = "OUTPUT",
229 [NF_INET_POST_ROUTING] = "POSTROUTING",
232 enum nf_ip_trace_comments {
233 NF_IP_TRACE_COMMENT_RULE,
234 NF_IP_TRACE_COMMENT_RETURN,
235 NF_IP_TRACE_COMMENT_POLICY,
238 static const char *comments[] = {
239 [NF_IP_TRACE_COMMENT_RULE] = "rule",
240 [NF_IP_TRACE_COMMENT_RETURN] = "return",
241 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
244 static struct nf_loginfo trace_loginfo = {
245 .type = NF_LOG_TYPE_LOG,
249 .logflags = NF_LOG_MASK,
255 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
256 char *hookname, char **chainname,
257 char **comment, unsigned int *rulenum)
259 struct ipt_standard_target *t = (void *)ipt_get_target(s);
261 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
262 /* Head of user chain: ERROR target with chainname */
263 *chainname = t->target.data;
268 if (s->target_offset == sizeof(struct ipt_entry)
269 && strcmp(t->target.u.kernel.target->name,
270 IPT_STANDARD_TARGET) == 0
272 && unconditional(&s->ip)) {
273 /* Tail of chains: STANDARD target (return/policy) */
274 *comment = *chainname == hookname
275 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
276 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
285 static void trace_packet(struct sk_buff *skb,
287 const struct net_device *in,
288 const struct net_device *out,
290 struct xt_table_info *private,
294 struct ipt_entry *root;
295 char *hookname, *chainname, *comment;
296 unsigned int rulenum = 0;
298 table_base = (void *)private->entries[smp_processor_id()];
299 root = get_entry(table_base, private->hook_entry[hook]);
301 hookname = chainname = (char *)hooknames[hook];
302 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
304 IPT_ENTRY_ITERATE(root,
305 private->size - private->hook_entry[hook],
306 get_chainname_rulenum,
307 e, hookname, &chainname, &comment, &rulenum);
309 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
310 "TRACE: %s:%s:%s:%u ",
311 tablename, chainname, comment, rulenum);
315 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
317 ipt_do_table(struct sk_buff *skb,
319 const struct net_device *in,
320 const struct net_device *out,
321 struct xt_table *table)
323 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
327 bool hotdrop = false;
328 /* Initializing verdict to NF_DROP keeps gcc happy. */
329 unsigned int verdict = NF_DROP;
330 const char *indev, *outdev;
332 struct ipt_entry *e, *back;
333 struct xt_table_info *private;
337 datalen = skb->len - ip->ihl * 4;
338 indev = in ? in->name : nulldevname;
339 outdev = out ? out->name : nulldevname;
340 /* We handle fragments by dealing with the first fragment as
341 * if it was a normal packet. All other fragments are treated
342 * normally, except that they will NEVER match rules that ask
343 * things we don't know, ie. tcp syn flag or ports). If the
344 * rule is also a fragment-specific rule, non-fragments won't
346 offset = ntohs(ip->frag_off) & IP_OFFSET;
348 read_lock_bh(&table->lock);
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 private = table->private;
351 table_base = (void *)private->entries[smp_processor_id()];
352 e = get_entry(table_base, private->hook_entry[hook]);
354 /* For return from builtin chain */
355 back = get_entry(table_base, private->underflow[hook]);
360 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
361 struct ipt_entry_target *t;
363 if (IPT_MATCH_ITERATE(e, do_match,
365 offset, &hotdrop) != 0)
368 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
370 t = ipt_get_target(e);
371 IP_NF_ASSERT(t->u.kernel.target);
373 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
374 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
375 /* The packet is traced: log it */
376 if (unlikely(skb->nf_trace))
377 trace_packet(skb, hook, in, out,
378 table->name, private, e);
380 /* Standard target? */
381 if (!t->u.kernel.target->target) {
384 v = ((struct ipt_standard_target *)t)->verdict;
386 /* Pop from stack? */
387 if (v != IPT_RETURN) {
388 verdict = (unsigned)(-v) - 1;
392 back = get_entry(table_base,
396 if (table_base + v != (void *)e + e->next_offset
397 && !(e->ip.flags & IPT_F_GOTO)) {
398 /* Save old back ptr in next entry */
399 struct ipt_entry *next
400 = (void *)e + e->next_offset;
402 = (void *)back - table_base;
403 /* set back pointer to next entry */
407 e = get_entry(table_base, v);
409 /* Targets which reenter must return
411 #ifdef CONFIG_NETFILTER_DEBUG
412 ((struct ipt_entry *)table_base)->comefrom
415 verdict = t->u.kernel.target->target(skb,
421 #ifdef CONFIG_NETFILTER_DEBUG
422 if (((struct ipt_entry *)table_base)->comefrom
424 && verdict == IPT_CONTINUE) {
425 printk("Target %s reentered!\n",
426 t->u.kernel.target->name);
429 ((struct ipt_entry *)table_base)->comefrom
432 /* Target might have changed stuff. */
434 datalen = skb->len - ip->ihl * 4;
436 if (verdict == IPT_CONTINUE)
437 e = (void *)e + e->next_offset;
445 e = (void *)e + e->next_offset;
449 read_unlock_bh(&table->lock);
451 #ifdef DEBUG_ALLOW_ALL
460 /* Figures out from what hook each rule can be called: returns 0 if
461 there are loops. Puts hook bitmask in comefrom. */
463 mark_source_chains(struct xt_table_info *newinfo,
464 unsigned int valid_hooks, void *entry0)
468 /* No recursion; use packet counter to save back ptrs (reset
469 to 0 as we leave), and comefrom to save source hook bitmask */
470 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
471 unsigned int pos = newinfo->hook_entry[hook];
472 struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos);
474 if (!(valid_hooks & (1 << hook)))
477 /* Set initial back pointer. */
478 e->counters.pcnt = pos;
481 struct ipt_standard_target *t
482 = (void *)ipt_get_target(e);
483 int visited = e->comefrom & (1 << hook);
485 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
486 printk("iptables: loop hook %u pos %u %08X.\n",
487 hook, pos, e->comefrom);
490 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
507 /* Return: backtrack through the last
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
524 /* We're at the start. */
528 e = (struct ipt_entry *)
530 } while (oldpos == pos + e->next_offset);
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
539 int newpos = t->verdict;
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
558 e = (struct ipt_entry *)
560 e->counters.pcnt = pos;
565 duprintf("Finished chain %u\n", hook);
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
573 if (i && (*i)-- == 0)
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
583 check_entry(struct ipt_entry *e, const char *name)
585 struct ipt_entry_target *t;
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 if (e->target_offset + sizeof(struct ipt_entry_target) >
596 t = ipt_get_target(e);
597 if (e->target_offset + t->u.target_size > e->next_offset)
603 static inline int check_match(struct ipt_entry_match *m, const char *name,
604 const struct ipt_ip *ip,
605 unsigned int hookmask, unsigned int *i)
607 struct xt_match *match;
610 match = m->u.kernel.match;
611 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
612 name, hookmask, ip->proto,
613 ip->invflags & IPT_INV_PROTO);
614 if (!ret && m->u.kernel.match->checkentry
615 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
617 duprintf("ip_tables: check failed for `%s'.\n",
618 m->u.kernel.match->name);
627 find_check_match(struct ipt_entry_match *m,
629 const struct ipt_ip *ip,
630 unsigned int hookmask,
633 struct xt_match *match;
636 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
638 "ipt_%s", m->u.user.name);
639 if (IS_ERR(match) || !match) {
640 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
641 return match ? PTR_ERR(match) : -ENOENT;
643 m->u.kernel.match = match;
645 ret = check_match(m, name, ip, hookmask, i);
651 module_put(m->u.kernel.match->me);
655 static inline int check_target(struct ipt_entry *e, const char *name)
657 struct ipt_entry_target *t;
658 struct xt_target *target;
661 t = ipt_get_target(e);
662 target = t->u.kernel.target;
663 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
664 name, e->comefrom, e->ip.proto,
665 e->ip.invflags & IPT_INV_PROTO);
666 if (!ret && t->u.kernel.target->checkentry
667 && !t->u.kernel.target->checkentry(name, e, target, t->data,
669 duprintf("ip_tables: check failed for `%s'.\n",
670 t->u.kernel.target->name);
677 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
680 struct ipt_entry_target *t;
681 struct xt_target *target;
685 ret = check_entry(e, name);
690 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
693 goto cleanup_matches;
695 t = ipt_get_target(e);
696 target = try_then_request_module(xt_find_target(AF_INET,
699 "ipt_%s", t->u.user.name);
700 if (IS_ERR(target) || !target) {
701 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
702 ret = target ? PTR_ERR(target) : -ENOENT;
703 goto cleanup_matches;
705 t->u.kernel.target = target;
707 ret = check_target(e, name);
714 module_put(t->u.kernel.target->me);
716 IPT_MATCH_ITERATE(e, cleanup_match, &j);
721 check_entry_size_and_hooks(struct ipt_entry *e,
722 struct xt_table_info *newinfo,
724 unsigned char *limit,
725 const unsigned int *hook_entries,
726 const unsigned int *underflows,
731 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
732 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
733 duprintf("Bad offset %p\n", e);
738 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
739 duprintf("checking: element %p size %u\n",
744 /* Check hooks & underflows */
745 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
746 if ((unsigned char *)e - base == hook_entries[h])
747 newinfo->hook_entry[h] = hook_entries[h];
748 if ((unsigned char *)e - base == underflows[h])
749 newinfo->underflow[h] = underflows[h];
752 /* FIXME: underflows must be unconditional, standard verdicts
753 < 0 (not IPT_RETURN). --RR */
755 /* Clear counters and comefrom */
756 e->counters = ((struct xt_counters) { 0, 0 });
764 cleanup_entry(struct ipt_entry *e, unsigned int *i)
766 struct ipt_entry_target *t;
768 if (i && (*i)-- == 0)
771 /* Cleanup all matches */
772 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
773 t = ipt_get_target(e);
774 if (t->u.kernel.target->destroy)
775 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
776 module_put(t->u.kernel.target->me);
780 /* Checks and translates the user-supplied table segment (held in
783 translate_table(const char *name,
784 unsigned int valid_hooks,
785 struct xt_table_info *newinfo,
789 const unsigned int *hook_entries,
790 const unsigned int *underflows)
795 newinfo->size = size;
796 newinfo->number = number;
798 /* Init all hooks to impossible value. */
799 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
800 newinfo->hook_entry[i] = 0xFFFFFFFF;
801 newinfo->underflow[i] = 0xFFFFFFFF;
804 duprintf("translate_table: size %u\n", newinfo->size);
806 /* Walk through entries, checking offsets. */
807 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
808 check_entry_size_and_hooks,
812 hook_entries, underflows, &i);
817 duprintf("translate_table: %u not %u entries\n",
822 /* Check hooks all assigned */
823 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
824 /* Only hooks which are valid */
825 if (!(valid_hooks & (1 << i)))
827 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
828 duprintf("Invalid hook entry %u %u\n",
832 if (newinfo->underflow[i] == 0xFFFFFFFF) {
833 duprintf("Invalid underflow %u %u\n",
839 if (!mark_source_chains(newinfo, valid_hooks, entry0))
842 /* Finally, each sanity check must pass */
844 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
845 find_check_entry, name, size, &i);
848 IPT_ENTRY_ITERATE(entry0, newinfo->size,
853 /* And one copy for every other CPU */
854 for_each_possible_cpu(i) {
855 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
856 memcpy(newinfo->entries[i], entry0, newinfo->size);
864 add_entry_to_counter(const struct ipt_entry *e,
865 struct xt_counters total[],
868 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
875 set_entry_to_counter(const struct ipt_entry *e,
876 struct ipt_counters total[],
879 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
886 get_counters(const struct xt_table_info *t,
887 struct xt_counters counters[])
893 /* Instead of clearing (by a previous call to memset())
894 * the counters and using adds, we set the counters
895 * with data used by 'current' CPU
896 * We dont care about preemption here.
898 curcpu = raw_smp_processor_id();
901 IPT_ENTRY_ITERATE(t->entries[curcpu],
903 set_entry_to_counter,
907 for_each_possible_cpu(cpu) {
911 IPT_ENTRY_ITERATE(t->entries[cpu],
913 add_entry_to_counter,
919 static inline struct xt_counters * alloc_counters(struct xt_table *table)
921 unsigned int countersize;
922 struct xt_counters *counters;
923 struct xt_table_info *private = table->private;
925 /* We need atomic snapshot of counters: rest doesn't change
926 (other than comefrom, which userspace doesn't care
928 countersize = sizeof(struct xt_counters) * private->number;
929 counters = vmalloc_node(countersize, numa_node_id());
931 if (counters == NULL)
932 return ERR_PTR(-ENOMEM);
934 /* First, sum counters... */
935 write_lock_bh(&table->lock);
936 get_counters(private, counters);
937 write_unlock_bh(&table->lock);
943 copy_entries_to_user(unsigned int total_size,
944 struct xt_table *table,
945 void __user *userptr)
947 unsigned int off, num;
949 struct xt_counters *counters;
950 struct xt_table_info *private = table->private;
954 counters = alloc_counters(table);
955 if (IS_ERR(counters))
956 return PTR_ERR(counters);
958 /* choose the copy that is on our node/cpu, ...
959 * This choice is lazy (because current thread is
960 * allowed to migrate to another cpu)
962 loc_cpu_entry = private->entries[raw_smp_processor_id()];
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
979 sizeof(counters[num])) != 0) {
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1016 #ifdef CONFIG_COMPAT
1017 static void compat_standard_from_user(void *dst, void *src)
1019 int v = *(compat_int_t *)src;
1022 v += xt_compat_calc_jump(AF_INET, v);
1023 memcpy(dst, &v, sizeof(v));
1026 static int compat_standard_to_user(void __user *dst, void *src)
1028 compat_int_t cv = *(int *)src;
1031 cv -= xt_compat_calc_jump(AF_INET, cv);
1032 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1036 compat_calc_match(struct ipt_entry_match *m, int *size)
1038 *size += xt_compat_match_offset(m->u.kernel.match);
1042 static int compat_calc_entry(struct ipt_entry *e,
1043 const struct xt_table_info *info,
1044 void *base, struct xt_table_info *newinfo)
1046 struct ipt_entry_target *t;
1047 unsigned int entry_offset;
1050 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1051 entry_offset = (void *)e - base;
1052 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1053 t = ipt_get_target(e);
1054 off += xt_compat_target_offset(t->u.kernel.target);
1055 newinfo->size -= off;
1056 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1060 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1061 if (info->hook_entry[i] &&
1062 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
1063 newinfo->hook_entry[i] -= off;
1064 if (info->underflow[i] &&
1065 (e < (struct ipt_entry *)(base + info->underflow[i])))
1066 newinfo->underflow[i] -= off;
1071 static int compat_table_info(const struct xt_table_info *info,
1072 struct xt_table_info *newinfo)
1074 void *loc_cpu_entry;
1076 if (!newinfo || !info)
1079 /* we dont care about newinfo->entries[] */
1080 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 newinfo->initial_entries = 0;
1082 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1083 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1084 compat_calc_entry, info, loc_cpu_entry,
1089 static int get_info(void __user *user, int *len, int compat)
1091 char name[IPT_TABLE_MAXNAMELEN];
1095 if (*len != sizeof(struct ipt_getinfo)) {
1096 duprintf("length %u != %zu\n", *len,
1097 sizeof(struct ipt_getinfo));
1101 if (copy_from_user(name, user, sizeof(name)) != 0)
1104 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1107 xt_compat_lock(AF_INET);
1109 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1110 "iptable_%s", name);
1111 if (t && !IS_ERR(t)) {
1112 struct ipt_getinfo info;
1113 struct xt_table_info *private = t->private;
1115 #ifdef CONFIG_COMPAT
1117 struct xt_table_info tmp;
1118 ret = compat_table_info(private, &tmp);
1119 xt_compat_flush_offsets(AF_INET);
1123 info.valid_hooks = t->valid_hooks;
1124 memcpy(info.hook_entry, private->hook_entry,
1125 sizeof(info.hook_entry));
1126 memcpy(info.underflow, private->underflow,
1127 sizeof(info.underflow));
1128 info.num_entries = private->number;
1129 info.size = private->size;
1130 strcpy(info.name, name);
1132 if (copy_to_user(user, &info, *len) != 0)
1140 ret = t ? PTR_ERR(t) : -ENOENT;
1141 #ifdef CONFIG_COMPAT
1143 xt_compat_unlock(AF_INET);
1149 get_entries(struct ipt_get_entries __user *uptr, int *len)
1152 struct ipt_get_entries get;
1155 if (*len < sizeof(get)) {
1156 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1159 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1161 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1162 duprintf("get_entries: %u != %zu\n",
1163 *len, sizeof(get) + get.size);
1167 t = xt_find_table_lock(AF_INET, get.name);
1168 if (t && !IS_ERR(t)) {
1169 struct xt_table_info *private = t->private;
1170 duprintf("t->private->number = %u\n", private->number);
1171 if (get.size == private->size)
1172 ret = copy_entries_to_user(private->size,
1173 t, uptr->entrytable);
1175 duprintf("get_entries: I've got %u not %u!\n",
1176 private->size, get.size);
1182 ret = t ? PTR_ERR(t) : -ENOENT;
1188 __do_replace(const char *name, unsigned int valid_hooks,
1189 struct xt_table_info *newinfo, unsigned int num_counters,
1190 void __user *counters_ptr)
1194 struct xt_table_info *oldinfo;
1195 struct xt_counters *counters;
1196 void *loc_cpu_old_entry;
1199 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1205 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1206 "iptable_%s", name);
1207 if (!t || IS_ERR(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1234 /* Get the old counters. */
1235 get_counters(oldinfo, counters);
1236 /* Decrease module usage counts and free resource */
1237 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1238 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1240 xt_free_table_info(oldinfo);
1241 if (copy_to_user(counters_ptr, counters,
1242 sizeof(struct xt_counters) * num_counters) != 0)
1251 free_newinfo_counters_untrans:
1258 do_replace(void __user *user, unsigned int len)
1261 struct ipt_replace tmp;
1262 struct xt_table_info *newinfo;
1263 void *loc_cpu_entry;
1265 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1268 /* overflow check */
1269 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1272 newinfo = xt_alloc_table_info(tmp.size);
1276 /* choose the copy that is on our node/cpu */
1277 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1278 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1284 ret = translate_table(tmp.name, tmp.valid_hooks,
1285 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1286 tmp.hook_entry, tmp.underflow);
1290 duprintf("ip_tables: Translated table\n");
1292 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1293 tmp.num_counters, tmp.counters);
1295 goto free_newinfo_untrans;
1298 free_newinfo_untrans:
1299 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1301 xt_free_table_info(newinfo);
1305 /* We're lazy, and add to the first CPU; overflow works its fey magic
1306 * and everything is OK. */
1308 add_counter_to_entry(struct ipt_entry *e,
1309 const struct xt_counters addme[],
1313 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1315 (long unsigned int)e->counters.pcnt,
1316 (long unsigned int)e->counters.bcnt,
1317 (long unsigned int)addme[*i].pcnt,
1318 (long unsigned int)addme[*i].bcnt);
1321 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1328 do_add_counters(void __user *user, unsigned int len, int compat)
1331 struct xt_counters_info tmp;
1332 struct xt_counters *paddc;
1333 unsigned int num_counters;
1338 struct xt_table_info *private;
1340 void *loc_cpu_entry;
1341 #ifdef CONFIG_COMPAT
1342 struct compat_xt_counters_info compat_tmp;
1346 size = sizeof(struct compat_xt_counters_info);
1351 size = sizeof(struct xt_counters_info);
1354 if (copy_from_user(ptmp, user, size) != 0)
1357 #ifdef CONFIG_COMPAT
1359 num_counters = compat_tmp.num_counters;
1360 name = compat_tmp.name;
1364 num_counters = tmp.num_counters;
1368 if (len != size + num_counters * sizeof(struct xt_counters))
1371 paddc = vmalloc_node(len - size, numa_node_id());
1375 if (copy_from_user(paddc, user + size, len - size) != 0) {
1380 t = xt_find_table_lock(AF_INET, name);
1381 if (!t || IS_ERR(t)) {
1382 ret = t ? PTR_ERR(t) : -ENOENT;
1386 write_lock_bh(&t->lock);
1387 private = t->private;
1388 if (private->number != num_counters) {
1390 goto unlock_up_free;
1394 /* Choose the copy that is on our node */
1395 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1396 IPT_ENTRY_ITERATE(loc_cpu_entry,
1398 add_counter_to_entry,
1402 write_unlock_bh(&t->lock);
1411 #ifdef CONFIG_COMPAT
1412 struct compat_ipt_replace {
1413 char name[IPT_TABLE_MAXNAMELEN];
1417 u32 hook_entry[NF_INET_NUMHOOKS];
1418 u32 underflow[NF_INET_NUMHOOKS];
1420 compat_uptr_t counters; /* struct ipt_counters * */
1421 struct compat_ipt_entry entries[0];
1425 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1426 compat_uint_t *size, struct xt_counters *counters,
1429 struct ipt_entry_target *t;
1430 struct compat_ipt_entry __user *ce;
1431 u_int16_t target_offset, next_offset;
1432 compat_uint_t origsize;
1437 ce = (struct compat_ipt_entry __user *)*dstptr;
1438 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1441 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1444 *dstptr += sizeof(struct compat_ipt_entry);
1445 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1447 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1448 target_offset = e->target_offset - (origsize - *size);
1451 t = ipt_get_target(e);
1452 ret = xt_compat_target_to_user(t, dstptr, size);
1456 next_offset = e->next_offset - (origsize - *size);
1457 if (put_user(target_offset, &ce->target_offset))
1459 if (put_user(next_offset, &ce->next_offset))
1469 compat_find_calc_match(struct ipt_entry_match *m,
1471 const struct ipt_ip *ip,
1472 unsigned int hookmask,
1475 struct xt_match *match;
1477 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1478 m->u.user.revision),
1479 "ipt_%s", m->u.user.name);
1480 if (IS_ERR(match) || !match) {
1481 duprintf("compat_check_calc_match: `%s' not found\n",
1483 return match ? PTR_ERR(match) : -ENOENT;
1485 m->u.kernel.match = match;
1486 *size += xt_compat_match_offset(match);
1493 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1495 if (i && (*i)-- == 0)
1498 module_put(m->u.kernel.match->me);
1503 compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
1505 struct ipt_entry_target *t;
1507 if (i && (*i)-- == 0)
1510 /* Cleanup all matches */
1511 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1512 t = compat_ipt_get_target(e);
1513 module_put(t->u.kernel.target->me);
1518 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1519 struct xt_table_info *newinfo,
1521 unsigned char *base,
1522 unsigned char *limit,
1523 unsigned int *hook_entries,
1524 unsigned int *underflows,
1528 struct ipt_entry_target *t;
1529 struct xt_target *target;
1530 unsigned int entry_offset;
1533 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1534 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1535 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1536 duprintf("Bad offset %p, limit = %p\n", e, limit);
1540 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1541 sizeof(struct compat_xt_entry_target)) {
1542 duprintf("checking: element %p size %u\n",
1547 /* For purposes of check_entry casting the compat entry is fine */
1548 ret = check_entry((struct ipt_entry *)e, name);
1552 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1553 entry_offset = (void *)e - (void *)base;
1555 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1556 &e->ip, e->comefrom, &off, &j);
1558 goto release_matches;
1560 t = compat_ipt_get_target(e);
1561 target = try_then_request_module(xt_find_target(AF_INET,
1563 t->u.user.revision),
1564 "ipt_%s", t->u.user.name);
1565 if (IS_ERR(target) || !target) {
1566 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1568 ret = target ? PTR_ERR(target) : -ENOENT;
1569 goto release_matches;
1571 t->u.kernel.target = target;
1573 off += xt_compat_target_offset(target);
1575 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1579 /* Check hooks & underflows */
1580 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1581 if ((unsigned char *)e - base == hook_entries[h])
1582 newinfo->hook_entry[h] = hook_entries[h];
1583 if ((unsigned char *)e - base == underflows[h])
1584 newinfo->underflow[h] = underflows[h];
1587 /* Clear counters and comefrom */
1588 memset(&e->counters, 0, sizeof(e->counters));
1595 module_put(t->u.kernel.target->me);
1597 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1602 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1603 unsigned int *size, const char *name,
1604 struct xt_table_info *newinfo, unsigned char *base)
1606 struct ipt_entry_target *t;
1607 struct xt_target *target;
1608 struct ipt_entry *de;
1609 unsigned int origsize;
1614 de = (struct ipt_entry *)*dstptr;
1615 memcpy(de, e, sizeof(struct ipt_entry));
1616 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1618 *dstptr += sizeof(struct ipt_entry);
1619 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1621 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1625 de->target_offset = e->target_offset - (origsize - *size);
1626 t = compat_ipt_get_target(e);
1627 target = t->u.kernel.target;
1628 xt_compat_target_from_user(t, dstptr, size);
1630 de->next_offset = e->next_offset - (origsize - *size);
1631 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1632 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1633 newinfo->hook_entry[h] -= origsize - *size;
1634 if ((unsigned char *)de - base < newinfo->underflow[h])
1635 newinfo->underflow[h] -= origsize - *size;
1640 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1646 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip,
1649 goto cleanup_matches;
1651 ret = check_target(e, name);
1653 goto cleanup_matches;
1659 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1664 translate_compat_table(const char *name,
1665 unsigned int valid_hooks,
1666 struct xt_table_info **pinfo,
1668 unsigned int total_size,
1669 unsigned int number,
1670 unsigned int *hook_entries,
1671 unsigned int *underflows)
1674 struct xt_table_info *newinfo, *info;
1675 void *pos, *entry0, *entry1;
1682 info->number = number;
1684 /* Init all hooks to impossible value. */
1685 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1686 info->hook_entry[i] = 0xFFFFFFFF;
1687 info->underflow[i] = 0xFFFFFFFF;
1690 duprintf("translate_compat_table: size %u\n", info->size);
1692 xt_compat_lock(AF_INET);
1693 /* Walk through entries, checking offsets. */
1694 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1695 check_compat_entry_size_and_hooks,
1696 info, &size, entry0,
1697 entry0 + total_size,
1698 hook_entries, underflows, &j, name);
1704 duprintf("translate_compat_table: %u not %u entries\n",
1709 /* Check hooks all assigned */
1710 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1711 /* Only hooks which are valid */
1712 if (!(valid_hooks & (1 << i)))
1714 if (info->hook_entry[i] == 0xFFFFFFFF) {
1715 duprintf("Invalid hook entry %u %u\n",
1716 i, hook_entries[i]);
1719 if (info->underflow[i] == 0xFFFFFFFF) {
1720 duprintf("Invalid underflow %u %u\n",
1727 newinfo = xt_alloc_table_info(size);
1731 newinfo->number = number;
1732 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1733 newinfo->hook_entry[i] = info->hook_entry[i];
1734 newinfo->underflow[i] = info->underflow[i];
1736 entry1 = newinfo->entries[raw_smp_processor_id()];
1739 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1740 compat_copy_entry_from_user,
1741 &pos, &size, name, newinfo, entry1);
1742 xt_compat_flush_offsets(AF_INET);
1743 xt_compat_unlock(AF_INET);
1748 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1752 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1756 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1757 compat_release_entry, &j);
1758 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1759 xt_free_table_info(newinfo);
1763 /* And one copy for every other CPU */
1764 for_each_possible_cpu(i)
1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1766 memcpy(newinfo->entries[i], entry1, newinfo->size);
1770 xt_free_table_info(info);
1774 xt_free_table_info(newinfo);
1776 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1779 xt_compat_flush_offsets(AF_INET);
1780 xt_compat_unlock(AF_INET);
1785 compat_do_replace(void __user *user, unsigned int len)
1788 struct compat_ipt_replace tmp;
1789 struct xt_table_info *newinfo;
1790 void *loc_cpu_entry;
1792 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1795 /* overflow check */
1796 if (tmp.size >= INT_MAX / num_possible_cpus())
1798 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1801 newinfo = xt_alloc_table_info(tmp.size);
1805 /* choose the copy that is on our node/cpu */
1806 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1807 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1813 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1814 &newinfo, &loc_cpu_entry, tmp.size,
1815 tmp.num_entries, tmp.hook_entry,
1820 duprintf("compat_do_replace: Translated table\n");
1822 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1823 tmp.num_counters, compat_ptr(tmp.counters));
1825 goto free_newinfo_untrans;
1828 free_newinfo_untrans:
1829 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1831 xt_free_table_info(newinfo);
1836 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1841 if (!capable(CAP_NET_ADMIN))
1845 case IPT_SO_SET_REPLACE:
1846 ret = compat_do_replace(user, len);
1849 case IPT_SO_SET_ADD_COUNTERS:
1850 ret = do_add_counters(user, len, 1);
1854 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1861 struct compat_ipt_get_entries {
1862 char name[IPT_TABLE_MAXNAMELEN];
1864 struct compat_ipt_entry entrytable[0];
1868 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1869 void __user *userptr)
1871 struct xt_counters *counters;
1872 struct xt_table_info *private = table->private;
1876 void *loc_cpu_entry;
1879 counters = alloc_counters(table);
1880 if (IS_ERR(counters))
1881 return PTR_ERR(counters);
1883 /* choose the copy that is on our node/cpu, ...
1884 * This choice is lazy (because current thread is
1885 * allowed to migrate to another cpu)
1887 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1890 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1891 compat_copy_entry_to_user,
1892 &pos, &size, counters, &i);
1899 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1902 struct compat_ipt_get_entries get;
1905 if (*len < sizeof(get)) {
1906 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1910 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1913 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1914 duprintf("compat_get_entries: %u != %zu\n",
1915 *len, sizeof(get) + get.size);
1919 xt_compat_lock(AF_INET);
1920 t = xt_find_table_lock(AF_INET, get.name);
1921 if (t && !IS_ERR(t)) {
1922 struct xt_table_info *private = t->private;
1923 struct xt_table_info info;
1924 duprintf("t->private->number = %u\n", private->number);
1925 ret = compat_table_info(private, &info);
1926 if (!ret && get.size == info.size) {
1927 ret = compat_copy_entries_to_user(private->size,
1928 t, uptr->entrytable);
1930 duprintf("compat_get_entries: I've got %u not %u!\n",
1931 private->size, get.size);
1934 xt_compat_flush_offsets(AF_INET);
1938 ret = t ? PTR_ERR(t) : -ENOENT;
1940 xt_compat_unlock(AF_INET);
1944 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1947 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1951 if (!capable(CAP_NET_ADMIN))
1955 case IPT_SO_GET_INFO:
1956 ret = get_info(user, len, 1);
1958 case IPT_SO_GET_ENTRIES:
1959 ret = compat_get_entries(user, len);
1962 ret = do_ipt_get_ctl(sk, cmd, user, len);
1969 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1973 if (!capable(CAP_NET_ADMIN))
1977 case IPT_SO_SET_REPLACE:
1978 ret = do_replace(user, len);
1981 case IPT_SO_SET_ADD_COUNTERS:
1982 ret = do_add_counters(user, len, 0);
1986 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1994 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1998 if (!capable(CAP_NET_ADMIN))
2002 case IPT_SO_GET_INFO:
2003 ret = get_info(user, len, 0);
2006 case IPT_SO_GET_ENTRIES:
2007 ret = get_entries(user, len);
2010 case IPT_SO_GET_REVISION_MATCH:
2011 case IPT_SO_GET_REVISION_TARGET: {
2012 struct ipt_get_revision rev;
2015 if (*len != sizeof(rev)) {
2019 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2024 if (cmd == IPT_SO_GET_REVISION_TARGET)
2029 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2032 "ipt_%s", rev.name);
2037 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2044 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2047 struct xt_table_info *newinfo;
2048 struct xt_table_info bootstrap
2049 = { 0, 0, 0, { 0 }, { 0 }, { } };
2050 void *loc_cpu_entry;
2052 newinfo = xt_alloc_table_info(repl->size);
2056 /* choose the copy on our node/cpu, but dont care about preemption */
2057 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2058 memcpy(loc_cpu_entry, repl->entries, repl->size);
2060 ret = translate_table(table->name, table->valid_hooks,
2061 newinfo, loc_cpu_entry, repl->size,
2066 xt_free_table_info(newinfo);
2070 ret = xt_register_table(table, &bootstrap, newinfo);
2072 xt_free_table_info(newinfo);
2079 void ipt_unregister_table(struct xt_table *table)
2081 struct xt_table_info *private;
2082 void *loc_cpu_entry;
2084 private = xt_unregister_table(table);
2086 /* Decrease module usage counts and free resources */
2087 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2088 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2089 xt_free_table_info(private);
2092 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2094 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2095 u_int8_t type, u_int8_t code,
2098 return ((test_type == 0xFF) ||
2099 (type == test_type && code >= min_code && code <= max_code))
2104 icmp_match(const struct sk_buff *skb,
2105 const struct net_device *in,
2106 const struct net_device *out,
2107 const struct xt_match *match,
2108 const void *matchinfo,
2110 unsigned int protoff,
2113 struct icmphdr _icmph, *ic;
2114 const struct ipt_icmp *icmpinfo = matchinfo;
2116 /* Must not be a fragment. */
2120 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2122 /* We've been asked to examine this packet, and we
2123 * can't. Hence, no choice but to drop.
2125 duprintf("Dropping evil ICMP tinygram.\n");
2130 return icmp_type_code_match(icmpinfo->type,
2134 !!(icmpinfo->invflags&IPT_ICMP_INV));
2137 /* Called when user tries to insert an entry of this type. */
2139 icmp_checkentry(const char *tablename,
2141 const struct xt_match *match,
2143 unsigned int hook_mask)
2145 const struct ipt_icmp *icmpinfo = matchinfo;
2147 /* Must specify no unknown invflags */
2148 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2151 /* The built-in targets: standard (NULL) and error. */
2152 static struct xt_target ipt_standard_target __read_mostly = {
2153 .name = IPT_STANDARD_TARGET,
2154 .targetsize = sizeof(int),
2156 #ifdef CONFIG_COMPAT
2157 .compatsize = sizeof(compat_int_t),
2158 .compat_from_user = compat_standard_from_user,
2159 .compat_to_user = compat_standard_to_user,
2163 static struct xt_target ipt_error_target __read_mostly = {
2164 .name = IPT_ERROR_TARGET,
2165 .target = ipt_error,
2166 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2170 static struct nf_sockopt_ops ipt_sockopts = {
2172 .set_optmin = IPT_BASE_CTL,
2173 .set_optmax = IPT_SO_SET_MAX+1,
2174 .set = do_ipt_set_ctl,
2175 #ifdef CONFIG_COMPAT
2176 .compat_set = compat_do_ipt_set_ctl,
2178 .get_optmin = IPT_BASE_CTL,
2179 .get_optmax = IPT_SO_GET_MAX+1,
2180 .get = do_ipt_get_ctl,
2181 #ifdef CONFIG_COMPAT
2182 .compat_get = compat_do_ipt_get_ctl,
2184 .owner = THIS_MODULE,
2187 static struct xt_match icmp_matchstruct __read_mostly = {
2189 .match = icmp_match,
2190 .matchsize = sizeof(struct ipt_icmp),
2191 .checkentry = icmp_checkentry,
2192 .proto = IPPROTO_ICMP,
2196 static int __init ip_tables_init(void)
2200 ret = xt_proto_init(AF_INET);
2204 /* Noone else will be downing sem now, so we won't sleep */
2205 ret = xt_register_target(&ipt_standard_target);
2208 ret = xt_register_target(&ipt_error_target);
2211 ret = xt_register_match(&icmp_matchstruct);
2215 /* Register setsockopt */
2216 ret = nf_register_sockopt(&ipt_sockopts);
2220 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2224 xt_unregister_match(&icmp_matchstruct);
2226 xt_unregister_target(&ipt_error_target);
2228 xt_unregister_target(&ipt_standard_target);
2230 xt_proto_fini(AF_INET);
2235 static void __exit ip_tables_fini(void)
2237 nf_unregister_sockopt(&ipt_sockopts);
2239 xt_unregister_match(&icmp_matchstruct);
2240 xt_unregister_target(&ipt_error_target);
2241 xt_unregister_target(&ipt_standard_target);
2243 xt_proto_fini(AF_INET);
2246 EXPORT_SYMBOL(ipt_register_table);
2247 EXPORT_SYMBOL(ipt_unregister_table);
2248 EXPORT_SYMBOL(ipt_do_table);
2249 module_init(ip_tables_init);
2250 module_exit(ip_tables_fini);