2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/mutex.h>
28 #include <net/net_namespace.h>
30 #include <net/route.h>
31 #include <net/netfilter/nf_queue.h>
33 #define IPQ_QMAX_DEFAULT 1024
34 #define IPQ_PROC_FS_NAME "ip_queue"
35 #define NET_IPQ_QMAX 2088
36 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
38 typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
40 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
41 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
42 static DEFINE_RWLOCK(queue_lock);
43 static int peer_pid __read_mostly;
44 static unsigned int copy_range __read_mostly;
45 static unsigned int queue_total;
46 static unsigned int queue_dropped = 0;
47 static unsigned int queue_user_dropped = 0;
48 static struct sock *ipqnl __read_mostly;
49 static LIST_HEAD(queue_list);
50 static DEFINE_MUTEX(ipqnl_mutex);
53 ipq_issue_verdict(struct nf_queue_entry *entry, int verdict)
55 /* TCP input path (and probably other bits) assume to be called
56 * from softirq context, not from syscall, like ipq_issue_verdict is
57 * called. TCP input path deadlocks with locks taken from timer
58 * softirq, e.g. We therefore emulate this by local_bh_disable() */
61 nf_reinject(entry, verdict);
66 __ipq_enqueue_entry(struct nf_queue_entry *entry)
68 list_add_tail(&entry->list, &queue_list);
73 __ipq_set_mode(unsigned char mode, unsigned int range)
87 if (copy_range > 0xFFFF)
98 static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
104 net_disable_timestamp();
105 __ipq_set_mode(IPQ_COPY_NONE, 0);
106 __ipq_flush(NULL, 0);
109 static struct nf_queue_entry *
110 ipq_find_dequeue_entry(unsigned long id)
112 struct nf_queue_entry *entry = NULL, *i;
114 write_lock_bh(&queue_lock);
116 list_for_each_entry(i, &queue_list, list) {
117 if ((unsigned long)i == id) {
124 list_del(&entry->list);
128 write_unlock_bh(&queue_lock);
133 __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
135 struct nf_queue_entry *entry, *next;
137 list_for_each_entry_safe(entry, next, &queue_list, list) {
138 if (!cmpfn || cmpfn(entry, data)) {
139 list_del(&entry->list);
141 ipq_issue_verdict(entry, NF_DROP);
147 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
149 write_lock_bh(&queue_lock);
150 __ipq_flush(cmpfn, data);
151 write_unlock_bh(&queue_lock);
154 static struct sk_buff *
155 ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
157 sk_buff_data_t old_tail;
161 struct ipq_packet_msg *pmsg;
162 struct nlmsghdr *nlh;
165 read_lock_bh(&queue_lock);
170 size = NLMSG_SPACE(sizeof(*pmsg));
174 case IPQ_COPY_PACKET:
175 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
176 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
177 (*errp = skb_checksum_help(entry->skb))) {
178 read_unlock_bh(&queue_lock);
181 if (copy_range == 0 || copy_range > entry->skb->len)
182 data_len = entry->skb->len;
184 data_len = copy_range;
186 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
191 read_unlock_bh(&queue_lock);
195 read_unlock_bh(&queue_lock);
197 skb = alloc_skb(size, GFP_ATOMIC);
201 old_tail = skb->tail;
202 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
203 pmsg = NLMSG_DATA(nlh);
204 memset(pmsg, 0, sizeof(*pmsg));
206 pmsg->packet_id = (unsigned long )entry;
207 pmsg->data_len = data_len;
208 tv = ktime_to_timeval(entry->skb->tstamp);
209 pmsg->timestamp_sec = tv.tv_sec;
210 pmsg->timestamp_usec = tv.tv_usec;
211 pmsg->mark = entry->skb->mark;
212 pmsg->hook = entry->hook;
213 pmsg->hw_protocol = entry->skb->protocol;
216 strcpy(pmsg->indev_name, entry->indev->name);
218 pmsg->indev_name[0] = '\0';
221 strcpy(pmsg->outdev_name, entry->outdev->name);
223 pmsg->outdev_name[0] = '\0';
225 if (entry->indev && entry->skb->dev) {
226 pmsg->hw_type = entry->skb->dev->type;
227 pmsg->hw_addrlen = dev_parse_header(entry->skb,
232 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
235 nlh->nlmsg_len = skb->tail - old_tail;
242 printk(KERN_ERR "ip_queue: error creating packet message\n");
247 ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
249 int status = -EINVAL;
250 struct sk_buff *nskb;
252 if (copy_mode == IPQ_COPY_NONE)
255 nskb = ipq_build_packet_message(entry, &status);
259 write_lock_bh(&queue_lock);
262 goto err_out_free_nskb;
264 if (queue_total >= queue_maxlen) {
268 printk (KERN_WARNING "ip_queue: full at %d entries, "
269 "dropping packets(s). Dropped: %d\n", queue_total,
271 goto err_out_free_nskb;
274 /* netlink_unicast will either free the nskb or attach it to a socket */
275 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
277 queue_user_dropped++;
281 __ipq_enqueue_entry(entry);
283 write_unlock_bh(&queue_lock);
290 write_unlock_bh(&queue_lock);
295 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
299 struct iphdr *user_iph = (struct iphdr *)v->payload;
301 if (v->data_len < sizeof(*user_iph))
303 diff = v->data_len - e->skb->len;
305 if (pskb_trim(e->skb, v->data_len))
307 } else if (diff > 0) {
308 if (v->data_len > 0xFFFF)
310 if (diff > skb_tailroom(e->skb)) {
311 err = pskb_expand_head(e->skb, 0,
312 diff - skb_tailroom(e->skb),
315 printk(KERN_WARNING "ip_queue: error "
316 "in mangle, dropping packet: %d\n", -err);
320 skb_put(e->skb, diff);
322 if (!skb_make_writable(e->skb, v->data_len))
324 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
325 e->skb->ip_summed = CHECKSUM_NONE;
331 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
333 struct nf_queue_entry *entry;
335 if (vmsg->value > NF_MAX_VERDICT)
338 entry = ipq_find_dequeue_entry(vmsg->id);
342 int verdict = vmsg->value;
344 if (vmsg->data_len && vmsg->data_len == len)
345 if (ipq_mangle_ipv4(vmsg, entry) < 0)
348 ipq_issue_verdict(entry, verdict);
354 ipq_set_mode(unsigned char mode, unsigned int range)
358 write_lock_bh(&queue_lock);
359 status = __ipq_set_mode(mode, range);
360 write_unlock_bh(&queue_lock);
365 ipq_receive_peer(struct ipq_peer_msg *pmsg,
366 unsigned char type, unsigned int len)
370 if (len < sizeof(*pmsg))
375 status = ipq_set_mode(pmsg->msg.mode.value,
376 pmsg->msg.mode.range);
380 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
383 status = ipq_set_verdict(&pmsg->msg.verdict,
384 len - sizeof(*pmsg));
393 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
396 if (entry->indev->ifindex == ifindex)
399 if (entry->outdev->ifindex == ifindex)
401 #ifdef CONFIG_BRIDGE_NETFILTER
402 if (entry->skb->nf_bridge) {
403 if (entry->skb->nf_bridge->physindev &&
404 entry->skb->nf_bridge->physindev->ifindex == ifindex)
406 if (entry->skb->nf_bridge->physoutdev &&
407 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
415 ipq_dev_drop(int ifindex)
417 ipq_flush(dev_cmp, ifindex);
420 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
423 __ipq_rcv_skb(struct sk_buff *skb)
425 int status, type, pid, flags, nlmsglen, skblen;
426 struct nlmsghdr *nlh;
429 if (skblen < sizeof(*nlh))
432 nlh = nlmsg_hdr(skb);
433 nlmsglen = nlh->nlmsg_len;
434 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
437 pid = nlh->nlmsg_pid;
438 flags = nlh->nlmsg_flags;
440 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
441 RCV_SKB_FAIL(-EINVAL);
443 if (flags & MSG_TRUNC)
444 RCV_SKB_FAIL(-ECOMM);
446 type = nlh->nlmsg_type;
447 if (type < NLMSG_NOOP || type >= IPQM_MAX)
448 RCV_SKB_FAIL(-EINVAL);
450 if (type <= IPQM_BASE)
453 if (security_netlink_recv(skb, CAP_NET_ADMIN))
454 RCV_SKB_FAIL(-EPERM);
456 write_lock_bh(&queue_lock);
459 if (peer_pid != pid) {
460 write_unlock_bh(&queue_lock);
461 RCV_SKB_FAIL(-EBUSY);
464 net_enable_timestamp();
468 write_unlock_bh(&queue_lock);
470 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
471 nlmsglen - NLMSG_LENGTH(0));
473 RCV_SKB_FAIL(status);
475 if (flags & NLM_F_ACK)
476 netlink_ack(skb, nlh, 0);
481 ipq_rcv_skb(struct sk_buff *skb)
483 mutex_lock(&ipqnl_mutex);
485 mutex_unlock(&ipqnl_mutex);
489 ipq_rcv_dev_event(struct notifier_block *this,
490 unsigned long event, void *ptr)
492 struct net_device *dev = ptr;
494 if (dev->nd_net != &init_net)
497 /* Drop any packets associated with the downed device */
498 if (event == NETDEV_DOWN)
499 ipq_dev_drop(dev->ifindex);
503 static struct notifier_block ipq_dev_notifier = {
504 .notifier_call = ipq_rcv_dev_event,
508 ipq_rcv_nl_event(struct notifier_block *this,
509 unsigned long event, void *ptr)
511 struct netlink_notify *n = ptr;
513 if (event == NETLINK_URELEASE &&
514 n->protocol == NETLINK_FIREWALL && n->pid) {
515 write_lock_bh(&queue_lock);
516 if ((n->net == &init_net) && (n->pid == peer_pid))
518 write_unlock_bh(&queue_lock);
523 static struct notifier_block ipq_nl_notifier = {
524 .notifier_call = ipq_rcv_nl_event,
527 static struct ctl_table_header *ipq_sysctl_header;
529 static ctl_table ipq_table[] = {
531 .ctl_name = NET_IPQ_QMAX,
532 .procname = NET_IPQ_QMAX_NAME,
533 .data = &queue_maxlen,
534 .maxlen = sizeof(queue_maxlen),
536 .proc_handler = proc_dointvec
541 static ctl_table ipq_dir_table[] = {
543 .ctl_name = NET_IPV4,
551 static ctl_table ipq_root_table[] = {
556 .child = ipq_dir_table
561 static int ip_queue_show(struct seq_file *m, void *v)
563 read_lock_bh(&queue_lock);
569 "Queue length : %u\n"
570 "Queue max. length : %u\n"
571 "Queue dropped : %u\n"
572 "Netlink dropped : %u\n",
581 read_unlock_bh(&queue_lock);
585 static int ip_queue_open(struct inode *inode, struct file *file)
587 return single_open(file, ip_queue_show, NULL);
590 static const struct file_operations ip_queue_proc_fops = {
591 .open = ip_queue_open,
594 .release = single_release,
595 .owner = THIS_MODULE,
598 static const struct nf_queue_handler nfqh = {
600 .outfn = &ipq_enqueue_packet,
603 static int __init ip_queue_init(void)
605 int status = -ENOMEM;
606 struct proc_dir_entry *proc;
608 netlink_register_notifier(&ipq_nl_notifier);
609 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
610 ipq_rcv_skb, NULL, THIS_MODULE);
612 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
613 goto cleanup_netlink_notifier;
616 proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
618 proc->owner = THIS_MODULE;
619 proc->proc_fops = &ip_queue_proc_fops;
621 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
625 register_netdevice_notifier(&ipq_dev_notifier);
626 ipq_sysctl_header = register_sysctl_table(ipq_root_table);
628 status = nf_register_queue_handler(PF_INET, &nfqh);
630 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
636 unregister_sysctl_table(ipq_sysctl_header);
637 unregister_netdevice_notifier(&ipq_dev_notifier);
638 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
640 sock_release(ipqnl->sk_socket);
641 mutex_lock(&ipqnl_mutex);
642 mutex_unlock(&ipqnl_mutex);
644 cleanup_netlink_notifier:
645 netlink_unregister_notifier(&ipq_nl_notifier);
649 static void __exit ip_queue_fini(void)
651 nf_unregister_queue_handlers(&nfqh);
655 unregister_sysctl_table(ipq_sysctl_header);
656 unregister_netdevice_notifier(&ipq_dev_notifier);
657 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
659 sock_release(ipqnl->sk_socket);
660 mutex_lock(&ipqnl_mutex);
661 mutex_unlock(&ipqnl_mutex);
663 netlink_unregister_notifier(&ipq_nl_notifier);
666 MODULE_DESCRIPTION("IPv4 packet queue handler");
667 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
668 MODULE_LICENSE("GPL");
670 module_init(ip_queue_init);
671 module_exit(ip_queue_fini);