2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/mutex.h>
28 #include <net/net_namespace.h>
30 #include <net/route.h>
32 #define IPQ_QMAX_DEFAULT 1024
33 #define IPQ_PROC_FS_NAME "ip_queue"
34 #define NET_IPQ_QMAX 2088
35 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
37 struct ipq_queue_entry {
38 struct list_head list;
43 typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
45 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
46 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
47 static DEFINE_RWLOCK(queue_lock);
48 static int peer_pid __read_mostly;
49 static unsigned int copy_range __read_mostly;
50 static unsigned int queue_total;
51 static unsigned int queue_dropped = 0;
52 static unsigned int queue_user_dropped = 0;
53 static struct sock *ipqnl __read_mostly;
54 static LIST_HEAD(queue_list);
55 static DEFINE_MUTEX(ipqnl_mutex);
58 ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
60 /* TCP input path (and probably other bits) assume to be called
61 * from softirq context, not from syscall, like ipq_issue_verdict is
62 * called. TCP input path deadlocks with locks taken from timer
63 * softirq, e.g. We therefore emulate this by local_bh_disable() */
66 nf_reinject(entry->skb, entry->info, verdict);
73 __ipq_enqueue_entry(struct ipq_queue_entry *entry)
75 list_add(&entry->list, &queue_list);
80 * Find and return a queued entry matched by cmpfn, or return the last
81 * entry if cmpfn is NULL.
83 static inline struct ipq_queue_entry *
84 __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
88 list_for_each_prev(p, &queue_list) {
89 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
91 if (!cmpfn || cmpfn(entry, data))
98 __ipq_dequeue_entry(struct ipq_queue_entry *entry)
100 list_del(&entry->list);
104 static inline struct ipq_queue_entry *
105 __ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
107 struct ipq_queue_entry *entry;
109 entry = __ipq_find_entry(cmpfn, data);
113 __ipq_dequeue_entry(entry);
119 __ipq_flush(int verdict)
121 struct ipq_queue_entry *entry;
123 while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
124 ipq_issue_verdict(entry, verdict);
128 __ipq_set_mode(unsigned char mode, unsigned int range)
139 case IPQ_COPY_PACKET:
142 if (copy_range > 0xFFFF)
157 net_disable_timestamp();
158 __ipq_set_mode(IPQ_COPY_NONE, 0);
159 __ipq_flush(NF_DROP);
162 static struct ipq_queue_entry *
163 ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
165 struct ipq_queue_entry *entry;
167 write_lock_bh(&queue_lock);
168 entry = __ipq_find_dequeue_entry(cmpfn, data);
169 write_unlock_bh(&queue_lock);
174 ipq_flush(int verdict)
176 write_lock_bh(&queue_lock);
177 __ipq_flush(verdict);
178 write_unlock_bh(&queue_lock);
181 static struct sk_buff *
182 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
184 sk_buff_data_t old_tail;
188 struct ipq_packet_msg *pmsg;
189 struct nlmsghdr *nlh;
192 read_lock_bh(&queue_lock);
197 size = NLMSG_SPACE(sizeof(*pmsg));
201 case IPQ_COPY_PACKET:
202 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
203 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
204 (*errp = skb_checksum_help(entry->skb))) {
205 read_unlock_bh(&queue_lock);
208 if (copy_range == 0 || copy_range > entry->skb->len)
209 data_len = entry->skb->len;
211 data_len = copy_range;
213 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
218 read_unlock_bh(&queue_lock);
222 read_unlock_bh(&queue_lock);
224 skb = alloc_skb(size, GFP_ATOMIC);
228 old_tail = skb->tail;
229 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
230 pmsg = NLMSG_DATA(nlh);
231 memset(pmsg, 0, sizeof(*pmsg));
233 pmsg->packet_id = (unsigned long )entry;
234 pmsg->data_len = data_len;
235 tv = ktime_to_timeval(entry->skb->tstamp);
236 pmsg->timestamp_sec = tv.tv_sec;
237 pmsg->timestamp_usec = tv.tv_usec;
238 pmsg->mark = entry->skb->mark;
239 pmsg->hook = entry->info->hook;
240 pmsg->hw_protocol = entry->skb->protocol;
242 if (entry->info->indev)
243 strcpy(pmsg->indev_name, entry->info->indev->name);
245 pmsg->indev_name[0] = '\0';
247 if (entry->info->outdev)
248 strcpy(pmsg->outdev_name, entry->info->outdev->name);
250 pmsg->outdev_name[0] = '\0';
252 if (entry->info->indev && entry->skb->dev) {
253 pmsg->hw_type = entry->skb->dev->type;
254 pmsg->hw_addrlen = dev_parse_header(entry->skb,
259 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
262 nlh->nlmsg_len = skb->tail - old_tail;
269 printk(KERN_ERR "ip_queue: error creating packet message\n");
274 ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
275 unsigned int queuenum, void *data)
277 int status = -EINVAL;
278 struct sk_buff *nskb;
279 struct ipq_queue_entry *entry;
281 if (copy_mode == IPQ_COPY_NONE)
284 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
286 printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
293 nskb = ipq_build_packet_message(entry, &status);
297 write_lock_bh(&queue_lock);
300 goto err_out_free_nskb;
302 if (queue_total >= queue_maxlen) {
306 printk (KERN_WARNING "ip_queue: full at %d entries, "
307 "dropping packets(s). Dropped: %d\n", queue_total,
309 goto err_out_free_nskb;
312 /* netlink_unicast will either free the nskb or attach it to a socket */
313 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
315 queue_user_dropped++;
319 __ipq_enqueue_entry(entry);
321 write_unlock_bh(&queue_lock);
328 write_unlock_bh(&queue_lock);
336 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
340 struct iphdr *user_iph = (struct iphdr *)v->payload;
342 if (v->data_len < sizeof(*user_iph))
344 diff = v->data_len - e->skb->len;
346 if (pskb_trim(e->skb, v->data_len))
348 } else if (diff > 0) {
349 if (v->data_len > 0xFFFF)
351 if (diff > skb_tailroom(e->skb)) {
352 err = pskb_expand_head(e->skb, 0,
353 diff - skb_tailroom(e->skb),
356 printk(KERN_WARNING "ip_queue: error "
357 "in mangle, dropping packet: %d\n", -err);
361 skb_put(e->skb, diff);
363 if (!skb_make_writable(e->skb, v->data_len))
365 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
366 e->skb->ip_summed = CHECKSUM_NONE;
372 id_cmp(struct ipq_queue_entry *e, unsigned long id)
374 return (id == (unsigned long )e);
378 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
380 struct ipq_queue_entry *entry;
382 if (vmsg->value > NF_MAX_VERDICT)
385 entry = ipq_find_dequeue_entry(id_cmp, vmsg->id);
389 int verdict = vmsg->value;
391 if (vmsg->data_len && vmsg->data_len == len)
392 if (ipq_mangle_ipv4(vmsg, entry) < 0)
395 ipq_issue_verdict(entry, verdict);
401 ipq_set_mode(unsigned char mode, unsigned int range)
405 write_lock_bh(&queue_lock);
406 status = __ipq_set_mode(mode, range);
407 write_unlock_bh(&queue_lock);
412 ipq_receive_peer(struct ipq_peer_msg *pmsg,
413 unsigned char type, unsigned int len)
417 if (len < sizeof(*pmsg))
422 status = ipq_set_mode(pmsg->msg.mode.value,
423 pmsg->msg.mode.range);
427 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
430 status = ipq_set_verdict(&pmsg->msg.verdict,
431 len - sizeof(*pmsg));
440 dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
442 if (entry->info->indev)
443 if (entry->info->indev->ifindex == ifindex)
445 if (entry->info->outdev)
446 if (entry->info->outdev->ifindex == ifindex)
448 #ifdef CONFIG_BRIDGE_NETFILTER
449 if (entry->skb->nf_bridge) {
450 if (entry->skb->nf_bridge->physindev &&
451 entry->skb->nf_bridge->physindev->ifindex == ifindex)
453 if (entry->skb->nf_bridge->physoutdev &&
454 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
462 ipq_dev_drop(int ifindex)
464 struct ipq_queue_entry *entry;
466 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
467 ipq_issue_verdict(entry, NF_DROP);
470 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
473 __ipq_rcv_skb(struct sk_buff *skb)
475 int status, type, pid, flags, nlmsglen, skblen;
476 struct nlmsghdr *nlh;
479 if (skblen < sizeof(*nlh))
482 nlh = nlmsg_hdr(skb);
483 nlmsglen = nlh->nlmsg_len;
484 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
487 pid = nlh->nlmsg_pid;
488 flags = nlh->nlmsg_flags;
490 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
491 RCV_SKB_FAIL(-EINVAL);
493 if (flags & MSG_TRUNC)
494 RCV_SKB_FAIL(-ECOMM);
496 type = nlh->nlmsg_type;
497 if (type < NLMSG_NOOP || type >= IPQM_MAX)
498 RCV_SKB_FAIL(-EINVAL);
500 if (type <= IPQM_BASE)
503 if (security_netlink_recv(skb, CAP_NET_ADMIN))
504 RCV_SKB_FAIL(-EPERM);
506 write_lock_bh(&queue_lock);
509 if (peer_pid != pid) {
510 write_unlock_bh(&queue_lock);
511 RCV_SKB_FAIL(-EBUSY);
514 net_enable_timestamp();
518 write_unlock_bh(&queue_lock);
520 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
521 nlmsglen - NLMSG_LENGTH(0));
523 RCV_SKB_FAIL(status);
525 if (flags & NLM_F_ACK)
526 netlink_ack(skb, nlh, 0);
531 ipq_rcv_skb(struct sk_buff *skb)
533 mutex_lock(&ipqnl_mutex);
535 mutex_unlock(&ipqnl_mutex);
539 ipq_rcv_dev_event(struct notifier_block *this,
540 unsigned long event, void *ptr)
542 struct net_device *dev = ptr;
544 if (dev->nd_net != &init_net)
547 /* Drop any packets associated with the downed device */
548 if (event == NETDEV_DOWN)
549 ipq_dev_drop(dev->ifindex);
553 static struct notifier_block ipq_dev_notifier = {
554 .notifier_call = ipq_rcv_dev_event,
558 ipq_rcv_nl_event(struct notifier_block *this,
559 unsigned long event, void *ptr)
561 struct netlink_notify *n = ptr;
563 if (event == NETLINK_URELEASE &&
564 n->protocol == NETLINK_FIREWALL && n->pid) {
565 write_lock_bh(&queue_lock);
566 if ((n->net == &init_net) && (n->pid == peer_pid))
568 write_unlock_bh(&queue_lock);
573 static struct notifier_block ipq_nl_notifier = {
574 .notifier_call = ipq_rcv_nl_event,
577 static struct ctl_table_header *ipq_sysctl_header;
579 static ctl_table ipq_table[] = {
581 .ctl_name = NET_IPQ_QMAX,
582 .procname = NET_IPQ_QMAX_NAME,
583 .data = &queue_maxlen,
584 .maxlen = sizeof(queue_maxlen),
586 .proc_handler = proc_dointvec
591 static ctl_table ipq_dir_table[] = {
593 .ctl_name = NET_IPV4,
601 static ctl_table ipq_root_table[] = {
606 .child = ipq_dir_table
611 static int ip_queue_show(struct seq_file *m, void *v)
613 read_lock_bh(&queue_lock);
619 "Queue length : %u\n"
620 "Queue max. length : %u\n"
621 "Queue dropped : %u\n"
622 "Netlink dropped : %u\n",
631 read_unlock_bh(&queue_lock);
635 static int ip_queue_open(struct inode *inode, struct file *file)
637 return single_open(file, ip_queue_show, NULL);
640 static const struct file_operations ip_queue_proc_fops = {
641 .open = ip_queue_open,
644 .release = single_release,
645 .owner = THIS_MODULE,
648 static struct nf_queue_handler nfqh = {
650 .outfn = &ipq_enqueue_packet,
653 static int __init ip_queue_init(void)
655 int status = -ENOMEM;
656 struct proc_dir_entry *proc;
658 netlink_register_notifier(&ipq_nl_notifier);
659 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
660 ipq_rcv_skb, NULL, THIS_MODULE);
662 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
663 goto cleanup_netlink_notifier;
666 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
668 proc->owner = THIS_MODULE;
669 proc->proc_fops = &ip_queue_proc_fops;
671 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
675 register_netdevice_notifier(&ipq_dev_notifier);
676 ipq_sysctl_header = register_sysctl_table(ipq_root_table);
678 status = nf_register_queue_handler(PF_INET, &nfqh);
680 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
686 unregister_sysctl_table(ipq_sysctl_header);
687 unregister_netdevice_notifier(&ipq_dev_notifier);
688 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
690 sock_release(ipqnl->sk_socket);
691 mutex_lock(&ipqnl_mutex);
692 mutex_unlock(&ipqnl_mutex);
694 cleanup_netlink_notifier:
695 netlink_unregister_notifier(&ipq_nl_notifier);
699 static void __exit ip_queue_fini(void)
701 nf_unregister_queue_handlers(&nfqh);
705 unregister_sysctl_table(ipq_sysctl_header);
706 unregister_netdevice_notifier(&ipq_dev_notifier);
707 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
709 sock_release(ipqnl->sk_socket);
710 mutex_lock(&ipqnl_mutex);
711 mutex_unlock(&ipqnl_mutex);
713 netlink_unregister_notifier(&ipq_nl_notifier);
716 MODULE_DESCRIPTION("IPv4 packet queue handler");
717 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
718 MODULE_LICENSE("GPL");
720 module_init(ip_queue_init);
721 module_exit(ip_queue_fini);