2 * Linux NET3: IP/IP protocol decoder.
4 * Version: $Id: ipip.c,v 1.50 2001/10/02 02:22:36 davem Exp $
7 * Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
10 * Alan Cox : Merged and made usable non modular (its so tiny its silly as
11 * a module taking up 2 pages).
12 * Alan Cox : Fixed bug with 1.3.18 and IPIP not working (now needs to set skb->h.iph)
13 * to keep ip_forward happy.
14 * Alan Cox : More fixes for 1.3.21, and firewall fix. Maybe this will work soon 8).
15 * Kai Schulte : Fixed #defines for IP_FIREWALL->FIREWALL
16 * David Woodhouse : Perform some basic ICMP handling.
17 * IPIP Routing without decapsulation.
18 * Carlos Picoto : GRE over IP support
19 * Alexey Kuznetsov: Reworked. Really, now it is truncated version of ipv4/ip_gre.c.
20 * I do not want to merge them together.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
29 /* tunnel.c: an IP tunnel driver
31 The purpose of this driver is to provide an IP tunnel through
32 which you can tunnel network traffic transparently across subnets.
34 This was written by looking at Nick Holloway's dummy driver
35 Thanks for the great code!
37 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
40 Cleaned up the code a little and added some pre-1.3.0 tweaks.
41 dev->hard_header/hard_header_len changed to use no headers.
42 Comments/bracketing tweaked.
43 Made the tunnels use dev->name not tunnel: when error reporting.
46 -Alan Cox (Alan.Cox@linux.org) 21 March 95
49 Changed to tunnel to destination gateway in addition to the
50 tunnel's pointopoint address
51 Almost completely rewritten
52 Note: There is currently no firewall or ICMP handling done.
54 -Sam Lantinga (slouken@cs.ucdavis.edu) 02/13/96
58 /* Things I wish I had known when writing the tunnel driver:
60 When the tunnel_xmit() function is called, the skb contains the
61 packet to be sent (plus a great deal of extra info), and dev
62 contains the tunnel device that _we_ are.
64 When we are passed a packet, we are expected to fill in the
65 source address with our source IP address.
67 What is the proper way to allocate, copy and free a buffer?
68 After you allocate it, it is a "0 length" chunk of memory
69 starting at zero. If you want to add headers to the buffer
70 later, you'll have to call "skb_reserve(skb, amount)" with
71 the amount of memory you want reserved. Then, you call
72 "skb_put(skb, amount)" with the amount of space you want in
73 the buffer. skb_put() returns a pointer to the top (#0) of
74 that buffer. skb->len is set to the amount of space you have
75 "allocated" with skb_put(). You can then write up to skb->len
76 bytes to that buffer. If you need more, you can call skb_put()
77 again with the additional amount of space you need. You can
78 find out how much more space you can allocate by calling
80 Now, to add header space, call "skb_push(skb, header_len)".
81 This creates space at the beginning of the buffer and returns
82 a pointer to this new space. If later you need to strip a
83 header from a buffer, call "skb_pull(skb, header_len)".
84 skb_headroom() will return how much space is left at the top
85 of the buffer (before the main data). Remember, this headroom
86 space must be reserved before the skb_put() function is called.
90 This version of net/ipv4/ipip.c is cloned of net/ipv4/ip_gre.c
92 For comments look at net/ipv4/ip_gre.c --ANK
96 #include <linux/capability.h>
97 #include <linux/module.h>
98 #include <linux/types.h>
99 #include <linux/kernel.h>
100 #include <asm/uaccess.h>
101 #include <linux/skbuff.h>
102 #include <linux/netdevice.h>
103 #include <linux/in.h>
104 #include <linux/tcp.h>
105 #include <linux/udp.h>
106 #include <linux/if_arp.h>
107 #include <linux/mroute.h>
108 #include <linux/init.h>
109 #include <linux/netfilter_ipv4.h>
110 #include <linux/if_ether.h>
112 #include <net/sock.h>
114 #include <net/icmp.h>
115 #include <net/ipip.h>
116 #include <net/inet_ecn.h>
117 #include <net/xfrm.h>
118 #include <net/net_namespace.h>
119 #include <net/netns/generic.h>
122 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
124 static int ipip_net_id;
126 struct net_device *fb_tunnel_dev;
129 static int ipip_fb_tunnel_init(struct net_device *dev);
130 static int ipip_tunnel_init(struct net_device *dev);
131 static void ipip_tunnel_setup(struct net_device *dev);
133 static struct ip_tunnel *tunnels_r_l[HASH_SIZE];
134 static struct ip_tunnel *tunnels_r[HASH_SIZE];
135 static struct ip_tunnel *tunnels_l[HASH_SIZE];
136 static struct ip_tunnel *tunnels_wc[1];
137 static struct ip_tunnel **tunnels[4] = { tunnels_wc, tunnels_l, tunnels_r, tunnels_r_l };
139 static DEFINE_RWLOCK(ipip_lock);
141 static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
142 __be32 remote, __be32 local)
144 unsigned h0 = HASH(remote);
145 unsigned h1 = HASH(local);
148 for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
149 if (local == t->parms.iph.saddr &&
150 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
153 for (t = tunnels_r[h0]; t; t = t->next) {
154 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
157 for (t = tunnels_l[h1]; t; t = t->next) {
158 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
161 if ((t = tunnels_wc[0]) != NULL && (t->dev->flags&IFF_UP))
166 static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
167 struct ip_tunnel_parm *parms)
169 __be32 remote = parms->iph.daddr;
170 __be32 local = parms->iph.saddr;
182 return &tunnels[prio][h];
185 static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
188 return __ipip_bucket(ipn, &t->parms);
191 static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
193 struct ip_tunnel **tp;
195 for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
197 write_lock_bh(&ipip_lock);
199 write_unlock_bh(&ipip_lock);
205 static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
207 struct ip_tunnel **tp = ipip_bucket(ipn, t);
210 write_lock_bh(&ipip_lock);
212 write_unlock_bh(&ipip_lock);
215 static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
216 struct ip_tunnel_parm *parms, int create)
218 __be32 remote = parms->iph.daddr;
219 __be32 local = parms->iph.saddr;
220 struct ip_tunnel *t, **tp, *nt;
221 struct net_device *dev;
223 struct ipip_net *ipn = net_generic(net, ipip_net_id);
225 for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) {
226 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
233 strlcpy(name, parms->name, IFNAMSIZ);
235 sprintf(name, "tunl%%d");
237 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
241 if (strchr(name, '%')) {
242 if (dev_alloc_name(dev, name) < 0)
246 nt = netdev_priv(dev);
247 dev->init = ipip_tunnel_init;
250 if (register_netdevice(dev) < 0)
254 ipip_tunnel_link(ipn, nt);
262 static void ipip_tunnel_uninit(struct net_device *dev)
264 struct net *net = dev_net(dev);
265 struct ipip_net *ipn = net_generic(net, ipip_net_id);
267 if (dev == ipn->fb_tunnel_dev) {
268 write_lock_bh(&ipip_lock);
269 tunnels_wc[0] = NULL;
270 write_unlock_bh(&ipip_lock);
272 ipip_tunnel_unlink(ipn, netdev_priv(dev));
276 static int ipip_err(struct sk_buff *skb, u32 info)
278 #ifndef I_WISH_WORLD_WERE_PERFECT
280 /* It is not :-( All the routers (except for Linux) return only
281 8 bytes of packet payload. It means, that precise relaying of
282 ICMP in the real Internet is absolutely infeasible.
284 struct iphdr *iph = (struct iphdr*)skb->data;
285 const int type = icmp_hdr(skb)->type;
286 const int code = icmp_hdr(skb)->code;
292 case ICMP_PARAMETERPROB:
295 case ICMP_DEST_UNREACH:
298 case ICMP_PORT_UNREACH:
299 /* Impossible event. */
301 case ICMP_FRAG_NEEDED:
302 /* Soft state for pmtu is maintained by IP core. */
305 /* All others are translated to HOST_UNREACH.
306 rfc2003 contains "deep thoughts" about NET_UNREACH,
307 I believe they are just ether pollution. --ANK
312 case ICMP_TIME_EXCEEDED:
313 if (code != ICMP_EXC_TTL)
320 read_lock(&ipip_lock);
321 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
322 if (t == NULL || t->parms.iph.daddr == 0)
326 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
329 if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
333 t->err_time = jiffies;
335 read_unlock(&ipip_lock);
338 struct iphdr *iph = (struct iphdr*)dp;
339 int hlen = iph->ihl<<2;
341 const int type = icmp_hdr(skb)->type;
342 const int code = icmp_hdr(skb)->code;
347 struct sk_buff *skb2;
351 if (len < hlen + sizeof(struct iphdr))
353 eiph = (struct iphdr*)(dp + hlen);
358 case ICMP_PARAMETERPROB:
359 n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
363 /* So... This guy found something strange INSIDE encapsulated
364 packet. Well, he is fool, but what can we do ?
366 rel_type = ICMP_PARAMETERPROB;
367 rel_info = htonl((n - hlen) << 24);
370 case ICMP_DEST_UNREACH:
373 case ICMP_PORT_UNREACH:
374 /* Impossible event. */
376 case ICMP_FRAG_NEEDED:
377 /* And it is the only really necessary thing :-) */
378 n = ntohs(icmp_hdr(skb)->un.frag.mtu);
382 /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
383 if (n > ntohs(eiph->tot_len))
388 /* All others are translated to HOST_UNREACH.
389 rfc2003 contains "deep thoughts" about NET_UNREACH,
390 I believe, it is just ether pollution. --ANK
392 rel_type = ICMP_DEST_UNREACH;
393 rel_code = ICMP_HOST_UNREACH;
397 case ICMP_TIME_EXCEEDED:
398 if (code != ICMP_EXC_TTL)
403 /* Prepare fake skb to feed it to icmp_send */
404 skb2 = skb_clone(skb, GFP_ATOMIC);
407 dst_release(skb2->dst);
409 skb_pull(skb2, skb->data - (u8*)eiph);
410 skb_reset_network_header(skb2);
412 /* Try to guess incoming interface */
413 memset(&fl, 0, sizeof(fl));
414 fl.fl4_daddr = eiph->saddr;
415 fl.fl4_tos = RT_TOS(eiph->tos);
416 fl.proto = IPPROTO_IPIP;
417 if (ip_route_output_key(&init_net, &rt, &key)) {
421 skb2->dev = rt->u.dst.dev;
423 /* route "incoming" packet */
424 if (rt->rt_flags&RTCF_LOCAL) {
427 fl.fl4_daddr = eiph->daddr;
428 fl.fl4_src = eiph->saddr;
429 fl.fl4_tos = eiph->tos;
430 if (ip_route_output_key(&init_net, &rt, &fl) ||
431 rt->u.dst.dev->type != ARPHRD_TUNNEL) {
438 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
439 skb2->dst->dev->type != ARPHRD_TUNNEL) {
445 /* change mtu on this route */
446 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
447 if (n > dst_mtu(skb2->dst)) {
451 skb2->dst->ops->update_pmtu(skb2->dst, n);
452 } else if (type == ICMP_TIME_EXCEEDED) {
453 struct ip_tunnel *t = netdev_priv(skb2->dev);
454 if (t->parms.iph.ttl) {
455 rel_type = ICMP_DEST_UNREACH;
456 rel_code = ICMP_HOST_UNREACH;
460 icmp_send(skb2, rel_type, rel_code, rel_info);
466 static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
469 struct iphdr *inner_iph = ip_hdr(skb);
471 if (INET_ECN_is_ce(outer_iph->tos))
472 IP_ECN_set_ce(inner_iph);
475 static int ipip_rcv(struct sk_buff *skb)
477 struct ip_tunnel *tunnel;
478 const struct iphdr *iph = ip_hdr(skb);
480 read_lock(&ipip_lock);
481 if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
482 iph->saddr, iph->daddr)) != NULL) {
483 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
484 read_unlock(&ipip_lock);
491 skb->mac_header = skb->network_header;
492 skb_reset_network_header(skb);
493 skb->protocol = htons(ETH_P_IP);
494 skb->pkt_type = PACKET_HOST;
496 tunnel->stat.rx_packets++;
497 tunnel->stat.rx_bytes += skb->len;
498 skb->dev = tunnel->dev;
499 dst_release(skb->dst);
502 ipip_ecn_decapsulate(iph, skb);
504 read_unlock(&ipip_lock);
507 read_unlock(&ipip_lock);
513 * This function assumes it is being called from dev_queue_xmit()
514 * and that skb is filled properly by that function.
517 static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
519 struct ip_tunnel *tunnel = netdev_priv(dev);
520 struct net_device_stats *stats = &tunnel->stat;
521 struct iphdr *tiph = &tunnel->parms.iph;
522 u8 tos = tunnel->parms.iph.tos;
523 __be16 df = tiph->frag_off;
524 struct rtable *rt; /* Route to the other host */
525 struct net_device *tdev; /* Device to other host */
526 struct iphdr *old_iph = ip_hdr(skb);
527 struct iphdr *iph; /* Our new IP header */
528 unsigned int max_headroom; /* The extra header space needed */
529 __be32 dst = tiph->daddr;
532 if (tunnel->recursion++) {
533 tunnel->stat.collisions++;
537 if (skb->protocol != htons(ETH_P_IP))
545 if ((rt = skb->rtable) == NULL) {
546 tunnel->stat.tx_fifo_errors++;
549 if ((dst = rt->rt_gateway) == 0)
554 struct flowi fl = { .oif = tunnel->parms.link,
557 .saddr = tiph->saddr,
558 .tos = RT_TOS(tos) } },
559 .proto = IPPROTO_IPIP };
560 if (ip_route_output_key(&init_net, &rt, &fl)) {
561 tunnel->stat.tx_carrier_errors++;
565 tdev = rt->u.dst.dev;
569 tunnel->stat.collisions++;
574 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
576 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
579 tunnel->stat.collisions++;
584 skb->dst->ops->update_pmtu(skb->dst, mtu);
586 df |= (old_iph->frag_off&htons(IP_DF));
588 if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
589 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
594 if (tunnel->err_count > 0) {
595 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
597 dst_link_failure(skb);
599 tunnel->err_count = 0;
603 * Okay, now see if we can stuff it in the buffer as-is.
605 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr));
607 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
608 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
609 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
618 skb_set_owner_w(new_skb, skb->sk);
621 old_iph = ip_hdr(skb);
624 skb->transport_header = skb->network_header;
625 skb_push(skb, sizeof(struct iphdr));
626 skb_reset_network_header(skb);
627 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
628 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
630 dst_release(skb->dst);
631 skb->dst = &rt->u.dst;
634 * Push down and install the IPIP header.
639 iph->ihl = sizeof(struct iphdr)>>2;
641 iph->protocol = IPPROTO_IPIP;
642 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
643 iph->daddr = rt->rt_dst;
644 iph->saddr = rt->rt_src;
646 if ((iph->ttl = tiph->ttl) == 0)
647 iph->ttl = old_iph->ttl;
656 dst_link_failure(skb);
664 static void ipip_tunnel_bind_dev(struct net_device *dev)
666 struct net_device *tdev = NULL;
667 struct ip_tunnel *tunnel;
670 tunnel = netdev_priv(dev);
671 iph = &tunnel->parms.iph;
674 struct flowi fl = { .oif = tunnel->parms.link,
676 { .daddr = iph->daddr,
678 .tos = RT_TOS(iph->tos) } },
679 .proto = IPPROTO_IPIP };
681 if (!ip_route_output_key(&init_net, &rt, &fl)) {
682 tdev = rt->u.dst.dev;
685 dev->flags |= IFF_POINTOPOINT;
688 if (!tdev && tunnel->parms.link)
689 tdev = __dev_get_by_index(&init_net, tunnel->parms.link);
692 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
693 dev->mtu = tdev->mtu - sizeof(struct iphdr);
695 dev->iflink = tunnel->parms.link;
699 ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
702 struct ip_tunnel_parm p;
704 struct net *net = dev_net(dev);
705 struct ipip_net *ipn = net_generic(net, ipip_net_id);
710 if (dev == ipn->fb_tunnel_dev) {
711 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
715 t = ipip_tunnel_locate(net, &p, 0);
718 t = netdev_priv(dev);
719 memcpy(&p, &t->parms, sizeof(p));
720 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
727 if (!capable(CAP_NET_ADMIN))
731 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
735 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
736 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
739 p.iph.frag_off |= htons(IP_DF);
741 t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
743 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
750 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
751 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
755 t = netdev_priv(dev);
756 ipip_tunnel_unlink(ipn, t);
757 t->parms.iph.saddr = p.iph.saddr;
758 t->parms.iph.daddr = p.iph.daddr;
759 memcpy(dev->dev_addr, &p.iph.saddr, 4);
760 memcpy(dev->broadcast, &p.iph.daddr, 4);
761 ipip_tunnel_link(ipn, t);
762 netdev_state_change(dev);
768 if (cmd == SIOCCHGTUNNEL) {
769 t->parms.iph.ttl = p.iph.ttl;
770 t->parms.iph.tos = p.iph.tos;
771 t->parms.iph.frag_off = p.iph.frag_off;
772 if (t->parms.link != p.link) {
773 t->parms.link = p.link;
774 ipip_tunnel_bind_dev(dev);
775 netdev_state_change(dev);
778 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
781 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
786 if (!capable(CAP_NET_ADMIN))
789 if (dev == ipn->fb_tunnel_dev) {
791 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
794 if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL)
797 if (t->dev == ipn->fb_tunnel_dev)
801 unregister_netdevice(dev);
813 static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
815 return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
818 static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
820 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
826 static void ipip_tunnel_setup(struct net_device *dev)
828 dev->uninit = ipip_tunnel_uninit;
829 dev->hard_start_xmit = ipip_tunnel_xmit;
830 dev->get_stats = ipip_tunnel_get_stats;
831 dev->do_ioctl = ipip_tunnel_ioctl;
832 dev->change_mtu = ipip_tunnel_change_mtu;
833 dev->destructor = free_netdev;
835 dev->type = ARPHRD_TUNNEL;
836 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
837 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
838 dev->flags = IFF_NOARP;
843 static int ipip_tunnel_init(struct net_device *dev)
845 struct ip_tunnel *tunnel;
847 tunnel = netdev_priv(dev);
850 strcpy(tunnel->parms.name, dev->name);
852 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
853 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
855 ipip_tunnel_bind_dev(dev);
860 static int ipip_fb_tunnel_init(struct net_device *dev)
862 struct ip_tunnel *tunnel = netdev_priv(dev);
863 struct iphdr *iph = &tunnel->parms.iph;
866 strcpy(tunnel->parms.name, dev->name);
869 iph->protocol = IPPROTO_IPIP;
873 tunnels_wc[0] = tunnel;
877 static struct xfrm_tunnel ipip_handler = {
879 .err_handler = ipip_err,
883 static char banner[] __initdata =
884 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
886 static int ipip_init_net(struct net *net)
889 struct ipip_net *ipn;
892 ipn = kmalloc(sizeof(struct ipip_net), GFP_KERNEL);
896 err = net_assign_generic(net, ipip_net_id, ipn);
900 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
903 if (!ipn->fb_tunnel_dev) {
908 ipn->fb_tunnel_dev->init = ipip_fb_tunnel_init;
909 dev_net_set(ipn->fb_tunnel_dev, net);
911 if ((err = register_netdev(ipn->fb_tunnel_dev)))
917 free_netdev(ipn->fb_tunnel_dev);
926 static void ipip_exit_net(struct net *net)
928 struct ipip_net *ipn;
930 ipn = net_generic(net, ipip_net_id);
932 unregister_netdevice(ipn->fb_tunnel_dev);
937 static struct pernet_operations ipip_net_ops = {
938 .init = ipip_init_net,
939 .exit = ipip_exit_net,
942 static int __init ipip_init(void)
948 if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
949 printk(KERN_INFO "ipip init: can't register tunnel\n");
953 err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops);
955 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
960 static void __exit ipip_destroy_tunnels(void)
964 for (prio = 1; prio < 4; prio++) {
966 for (h = 0; h < HASH_SIZE; h++) {
968 while ((t = tunnels[prio][h]) != NULL)
969 unregister_netdevice(t->dev);
974 static void __exit ipip_fini(void)
976 if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
977 printk(KERN_INFO "ipip close: can't deregister tunnel\n");
980 ipip_destroy_tunnels();
983 unregister_pernet_gen_device(ipip_net_id, &ipip_net_ops);
986 module_init(ipip_init);
987 module_exit(ipip_fini);
988 MODULE_LICENSE("GPL");