2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: FIB frontend.
8 * Version: $Id: fib_frontend.c,v 1.26 2001/10/31 21:55:54 davem Exp $
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/module.h>
19 #include <asm/uaccess.h>
20 #include <asm/system.h>
21 #include <linux/bitops.h>
22 #include <linux/capability.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
29 #include <linux/errno.h>
31 #include <linux/inet.h>
32 #include <linux/inetdevice.h>
33 #include <linux/netdevice.h>
34 #include <linux/if_addr.h>
35 #include <linux/if_arp.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/list.h>
41 #include <net/protocol.h>
42 #include <net/route.h>
47 #include <net/ip_fib.h>
48 #include <net/rtnetlink.h>
50 #ifndef CONFIG_IP_MULTIPLE_TABLES
52 static int __net_init fib4_rules_init(struct net *net)
54 struct fib_table *local_table, *main_table;
56 local_table = fib_hash_table(RT_TABLE_LOCAL);
57 if (local_table == NULL)
60 main_table = fib_hash_table(RT_TABLE_MAIN);
61 if (main_table == NULL)
64 hlist_add_head_rcu(&local_table->tb_hlist,
65 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
66 hlist_add_head_rcu(&main_table->tb_hlist,
67 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
76 struct fib_table *fib_new_table(struct net *net, u32 id)
83 tb = fib_get_table(net, id);
87 tb = fib_hash_table(id);
90 h = id & (FIB_TABLE_HASHSZ - 1);
91 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
95 struct fib_table *fib_get_table(struct net *net, u32 id)
98 struct hlist_node *node;
99 struct hlist_head *head;
104 h = id & (FIB_TABLE_HASHSZ - 1);
107 head = &net->ipv4.fib_table_hash[h];
108 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
109 if (tb->tb_id == id) {
117 #endif /* CONFIG_IP_MULTIPLE_TABLES */
119 static void fib_flush(struct net *net)
122 struct fib_table *tb;
123 struct hlist_node *node;
124 struct hlist_head *head;
127 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
128 head = &net->ipv4.fib_table_hash[h];
129 hlist_for_each_entry(tb, node, head, tb_hlist)
130 flushed += tb->tb_flush(tb);
138 * Find the first device with a given source address.
141 struct net_device * ip_dev_find(__be32 addr)
143 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
144 struct fib_result res;
145 struct net_device *dev = NULL;
146 struct fib_table *local_table;
148 #ifdef CONFIG_IP_MULTIPLE_TABLES
152 local_table = fib_get_table(&init_net, RT_TABLE_LOCAL);
153 if (!local_table || local_table->tb_lookup(local_table, &fl, &res))
155 if (res.type != RTN_LOCAL)
157 dev = FIB_RES_DEV(res);
167 * Find address type as if only "dev" was present in the system. If
168 * on_dev is NULL then all interfaces are taken into consideration.
170 static inline unsigned __inet_dev_addr_type(struct net *net,
171 const struct net_device *dev,
174 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
175 struct fib_result res;
176 unsigned ret = RTN_BROADCAST;
177 struct fib_table *local_table;
179 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
180 return RTN_BROADCAST;
181 if (ipv4_is_multicast(addr))
182 return RTN_MULTICAST;
184 #ifdef CONFIG_IP_MULTIPLE_TABLES
188 local_table = fib_get_table(net, RT_TABLE_LOCAL);
191 if (!local_table->tb_lookup(local_table, &fl, &res)) {
192 if (!dev || dev == res.fi->fib_dev)
200 unsigned int inet_addr_type(struct net *net, __be32 addr)
202 return __inet_dev_addr_type(net, NULL, addr);
205 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
208 return __inet_dev_addr_type(net, dev, addr);
211 /* Given (packet source, input interface) and optional (dst, oif, tos):
212 - (main) check, that source is valid i.e. not broadcast or our local
214 - figure out what "logical" interface this packet arrived
215 and calculate "specific destination" address.
216 - check, that packet arrived from expected physical interface.
219 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
220 struct net_device *dev, __be32 *spec_dst, u32 *itag)
222 struct in_device *in_dev;
223 struct flowi fl = { .nl_u = { .ip4_u =
228 struct fib_result res;
235 in_dev = __in_dev_get_rcu(dev);
237 no_addr = in_dev->ifa_list == NULL;
238 rpf = IN_DEV_RPFILTER(in_dev);
246 if (fib_lookup(net, &fl, &res))
248 if (res.type != RTN_UNICAST)
250 *spec_dst = FIB_RES_PREFSRC(res);
251 fib_combine_itag(itag, &res);
252 #ifdef CONFIG_IP_ROUTE_MULTIPATH
253 if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
255 if (FIB_RES_DEV(res) == dev)
258 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
267 fl.oif = dev->ifindex;
270 if (fib_lookup(net, &fl, &res) == 0) {
271 if (res.type == RTN_UNICAST) {
272 *spec_dst = FIB_RES_PREFSRC(res);
273 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
282 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
292 static inline __be32 sk_extract_addr(struct sockaddr *addr)
294 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
297 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
301 nla = (struct nlattr *) ((char *) mx + len);
302 nla->nla_type = type;
303 nla->nla_len = nla_attr_size(4);
304 *(u32 *) nla_data(nla) = value;
306 return len + nla_total_size(4);
309 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
310 struct fib_config *cfg)
315 memset(cfg, 0, sizeof(*cfg));
316 cfg->fc_nlinfo.nl_net = net;
318 if (rt->rt_dst.sa_family != AF_INET)
319 return -EAFNOSUPPORT;
322 * Check mask for validity:
323 * a) it must be contiguous.
324 * b) destination must have all host bits clear.
325 * c) if application forgot to set correct family (AF_INET),
326 * reject request unless it is absolutely clear i.e.
327 * both family and mask are zero.
330 addr = sk_extract_addr(&rt->rt_dst);
331 if (!(rt->rt_flags & RTF_HOST)) {
332 __be32 mask = sk_extract_addr(&rt->rt_genmask);
334 if (rt->rt_genmask.sa_family != AF_INET) {
335 if (mask || rt->rt_genmask.sa_family)
336 return -EAFNOSUPPORT;
339 if (bad_mask(mask, addr))
342 plen = inet_mask_len(mask);
345 cfg->fc_dst_len = plen;
348 if (cmd != SIOCDELRT) {
349 cfg->fc_nlflags = NLM_F_CREATE;
350 cfg->fc_protocol = RTPROT_BOOT;
354 cfg->fc_priority = rt->rt_metric - 1;
356 if (rt->rt_flags & RTF_REJECT) {
357 cfg->fc_scope = RT_SCOPE_HOST;
358 cfg->fc_type = RTN_UNREACHABLE;
362 cfg->fc_scope = RT_SCOPE_NOWHERE;
363 cfg->fc_type = RTN_UNICAST;
367 struct net_device *dev;
368 char devname[IFNAMSIZ];
370 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
373 devname[IFNAMSIZ-1] = 0;
374 colon = strchr(devname, ':');
377 dev = __dev_get_by_name(net, devname);
380 cfg->fc_oif = dev->ifindex;
382 struct in_ifaddr *ifa;
383 struct in_device *in_dev = __in_dev_get_rtnl(dev);
387 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
388 if (strcmp(ifa->ifa_label, devname) == 0)
392 cfg->fc_prefsrc = ifa->ifa_local;
396 addr = sk_extract_addr(&rt->rt_gateway);
397 if (rt->rt_gateway.sa_family == AF_INET && addr) {
399 if (rt->rt_flags & RTF_GATEWAY &&
400 inet_addr_type(net, addr) == RTN_UNICAST)
401 cfg->fc_scope = RT_SCOPE_UNIVERSE;
404 if (cmd == SIOCDELRT)
407 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
410 if (cfg->fc_scope == RT_SCOPE_NOWHERE)
411 cfg->fc_scope = RT_SCOPE_LINK;
413 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
417 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
421 if (rt->rt_flags & RTF_MTU)
422 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
424 if (rt->rt_flags & RTF_WINDOW)
425 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
427 if (rt->rt_flags & RTF_IRTT)
428 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
431 cfg->fc_mx_len = len;
438 * Handle IP routing ioctl calls. These are used to manipulate the routing tables
441 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
443 struct fib_config cfg;
448 case SIOCADDRT: /* Add a route */
449 case SIOCDELRT: /* Delete a route */
450 if (!capable(CAP_NET_ADMIN))
453 if (copy_from_user(&rt, arg, sizeof(rt)))
457 err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
459 struct fib_table *tb;
461 if (cmd == SIOCDELRT) {
462 tb = fib_get_table(net, cfg.fc_table);
464 err = tb->tb_delete(tb, &cfg);
468 tb = fib_new_table(net, cfg.fc_table);
470 err = tb->tb_insert(tb, &cfg);
475 /* allocated by rtentry_to_fib_config() */
484 const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
485 [RTA_DST] = { .type = NLA_U32 },
486 [RTA_SRC] = { .type = NLA_U32 },
487 [RTA_IIF] = { .type = NLA_U32 },
488 [RTA_OIF] = { .type = NLA_U32 },
489 [RTA_GATEWAY] = { .type = NLA_U32 },
490 [RTA_PRIORITY] = { .type = NLA_U32 },
491 [RTA_PREFSRC] = { .type = NLA_U32 },
492 [RTA_METRICS] = { .type = NLA_NESTED },
493 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
494 [RTA_PROTOINFO] = { .type = NLA_U32 },
495 [RTA_FLOW] = { .type = NLA_U32 },
498 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
499 struct nlmsghdr *nlh, struct fib_config *cfg)
505 err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
509 memset(cfg, 0, sizeof(*cfg));
511 rtm = nlmsg_data(nlh);
512 cfg->fc_dst_len = rtm->rtm_dst_len;
513 cfg->fc_tos = rtm->rtm_tos;
514 cfg->fc_table = rtm->rtm_table;
515 cfg->fc_protocol = rtm->rtm_protocol;
516 cfg->fc_scope = rtm->rtm_scope;
517 cfg->fc_type = rtm->rtm_type;
518 cfg->fc_flags = rtm->rtm_flags;
519 cfg->fc_nlflags = nlh->nlmsg_flags;
521 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
522 cfg->fc_nlinfo.nlh = nlh;
523 cfg->fc_nlinfo.nl_net = net;
525 if (cfg->fc_type > RTN_MAX) {
530 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
531 switch (nla_type(attr)) {
533 cfg->fc_dst = nla_get_be32(attr);
536 cfg->fc_oif = nla_get_u32(attr);
539 cfg->fc_gw = nla_get_be32(attr);
542 cfg->fc_priority = nla_get_u32(attr);
545 cfg->fc_prefsrc = nla_get_be32(attr);
548 cfg->fc_mx = nla_data(attr);
549 cfg->fc_mx_len = nla_len(attr);
552 cfg->fc_mp = nla_data(attr);
553 cfg->fc_mp_len = nla_len(attr);
556 cfg->fc_flow = nla_get_u32(attr);
559 cfg->fc_table = nla_get_u32(attr);
569 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
571 struct net *net = skb->sk->sk_net;
572 struct fib_config cfg;
573 struct fib_table *tb;
576 err = rtm_to_fib_config(net, skb, nlh, &cfg);
580 tb = fib_get_table(net, cfg.fc_table);
586 err = tb->tb_delete(tb, &cfg);
591 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
593 struct net *net = skb->sk->sk_net;
594 struct fib_config cfg;
595 struct fib_table *tb;
598 err = rtm_to_fib_config(net, skb, nlh, &cfg);
602 tb = fib_new_table(net, cfg.fc_table);
608 err = tb->tb_insert(tb, &cfg);
613 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
615 struct net *net = skb->sk->sk_net;
617 unsigned int e = 0, s_e;
618 struct fib_table *tb;
619 struct hlist_node *node;
620 struct hlist_head *head;
623 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
624 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
625 return ip_rt_dump(skb, cb);
630 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
632 head = &net->ipv4.fib_table_hash[h];
633 hlist_for_each_entry(tb, node, head, tb_hlist) {
637 memset(&cb->args[2], 0, sizeof(cb->args) -
638 2 * sizeof(cb->args[0]));
639 if (tb->tb_dump(tb, skb, cb) < 0)
653 /* Prepare and feed intra-kernel routing request.
654 Really, it should be netlink message, but :-( netlink
655 can be not configured, so that we feed it directly
656 to fib engine. It is legal, because all events occur
657 only when netlink is already locked.
660 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
662 struct net *net = ifa->ifa_dev->dev->nd_net;
663 struct fib_table *tb;
664 struct fib_config cfg = {
665 .fc_protocol = RTPROT_KERNEL,
668 .fc_dst_len = dst_len,
669 .fc_prefsrc = ifa->ifa_local,
670 .fc_oif = ifa->ifa_dev->dev->ifindex,
671 .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
677 if (type == RTN_UNICAST)
678 tb = fib_new_table(net, RT_TABLE_MAIN);
680 tb = fib_new_table(net, RT_TABLE_LOCAL);
685 cfg.fc_table = tb->tb_id;
687 if (type != RTN_LOCAL)
688 cfg.fc_scope = RT_SCOPE_LINK;
690 cfg.fc_scope = RT_SCOPE_HOST;
692 if (cmd == RTM_NEWROUTE)
693 tb->tb_insert(tb, &cfg);
695 tb->tb_delete(tb, &cfg);
698 void fib_add_ifaddr(struct in_ifaddr *ifa)
700 struct in_device *in_dev = ifa->ifa_dev;
701 struct net_device *dev = in_dev->dev;
702 struct in_ifaddr *prim = ifa;
703 __be32 mask = ifa->ifa_mask;
704 __be32 addr = ifa->ifa_local;
705 __be32 prefix = ifa->ifa_address&mask;
707 if (ifa->ifa_flags&IFA_F_SECONDARY) {
708 prim = inet_ifa_byprefix(in_dev, prefix, mask);
710 printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
715 fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
717 if (!(dev->flags&IFF_UP))
720 /* Add broadcast address, if it is explicitly assigned. */
721 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
722 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
724 if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags&IFA_F_SECONDARY) &&
725 (prefix != addr || ifa->ifa_prefixlen < 32)) {
726 fib_magic(RTM_NEWROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL :
727 RTN_UNICAST, prefix, ifa->ifa_prefixlen, prim);
729 /* Add network specific broadcasts, when it takes a sense */
730 if (ifa->ifa_prefixlen < 31) {
731 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
732 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix|~mask, 32, prim);
737 static void fib_del_ifaddr(struct in_ifaddr *ifa)
739 struct in_device *in_dev = ifa->ifa_dev;
740 struct net_device *dev = in_dev->dev;
741 struct in_ifaddr *ifa1;
742 struct in_ifaddr *prim = ifa;
743 __be32 brd = ifa->ifa_address|~ifa->ifa_mask;
744 __be32 any = ifa->ifa_address&ifa->ifa_mask;
751 if (!(ifa->ifa_flags&IFA_F_SECONDARY))
752 fib_magic(RTM_DELROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL :
753 RTN_UNICAST, any, ifa->ifa_prefixlen, prim);
755 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
757 printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
762 /* Deletion is more complicated than add.
763 We should take care of not to delete too much :-)
765 Scan address list to be sure that addresses are really gone.
768 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
769 if (ifa->ifa_local == ifa1->ifa_local)
771 if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
773 if (brd == ifa1->ifa_broadcast)
775 if (any == ifa1->ifa_broadcast)
780 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
782 fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
784 fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
785 if (!(ok&LOCAL_OK)) {
786 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
788 /* Check, that this local address finally disappeared. */
789 if (inet_addr_type(dev->nd_net, ifa->ifa_local) != RTN_LOCAL) {
790 /* And the last, but not the least thing.
791 We must flush stray FIB entries.
793 First of all, we scan fib_info list searching
794 for stray nexthop entries, then ignite fib_flush.
796 if (fib_sync_down(ifa->ifa_local, NULL, 0))
797 fib_flush(dev->nd_net);
806 static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
809 struct fib_result res;
810 struct flowi fl = { .mark = frn->fl_mark,
811 .nl_u = { .ip4_u = { .daddr = frn->fl_addr,
813 .scope = frn->fl_scope } } };
815 #ifdef CONFIG_IP_MULTIPLE_TABLES
823 frn->tb_id = tb->tb_id;
824 frn->err = tb->tb_lookup(tb, &fl, &res);
827 frn->prefixlen = res.prefixlen;
828 frn->nh_sel = res.nh_sel;
829 frn->type = res.type;
830 frn->scope = res.scope;
837 static void nl_fib_input(struct sk_buff *skb)
840 struct fib_result_nl *frn;
841 struct nlmsghdr *nlh;
842 struct fib_table *tb;
845 net = skb->sk->sk_net;
846 nlh = nlmsg_hdr(skb);
847 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
848 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
851 skb = skb_clone(skb, GFP_KERNEL);
854 nlh = nlmsg_hdr(skb);
856 frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
857 tb = fib_get_table(net, frn->tb_id_in);
859 nl_fib_lookup(frn, tb);
861 pid = NETLINK_CB(skb).pid; /* pid of sending process */
862 NETLINK_CB(skb).pid = 0; /* from kernel */
863 NETLINK_CB(skb).dst_group = 0; /* unicast */
864 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
867 static int nl_fib_lookup_init(struct net *net)
870 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
871 nl_fib_input, NULL, THIS_MODULE);
873 return -EAFNOSUPPORT;
874 net->ipv4.fibnl = sk;
878 static void nl_fib_lookup_exit(struct net *net)
880 netlink_kernel_release(net->ipv4.fibnl);
881 net->ipv4.fibnl = NULL;
884 static void fib_disable_ip(struct net_device *dev, int force)
886 if (fib_sync_down(0, dev, force))
887 fib_flush(dev->nd_net);
892 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
894 struct in_ifaddr *ifa = (struct in_ifaddr*)ptr;
899 #ifdef CONFIG_IP_ROUTE_MULTIPATH
900 fib_sync_up(ifa->ifa_dev->dev);
906 if (ifa->ifa_dev->ifa_list == NULL) {
907 /* Last address was deleted from this interface.
910 fib_disable_ip(ifa->ifa_dev->dev, 1);
919 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
921 struct net_device *dev = ptr;
922 struct in_device *in_dev = __in_dev_get_rtnl(dev);
924 if (event == NETDEV_UNREGISTER) {
925 fib_disable_ip(dev, 2);
936 } endfor_ifa(in_dev);
937 #ifdef CONFIG_IP_ROUTE_MULTIPATH
943 fib_disable_ip(dev, 0);
945 case NETDEV_CHANGEMTU:
953 static struct notifier_block fib_inetaddr_notifier = {
954 .notifier_call =fib_inetaddr_event,
957 static struct notifier_block fib_netdev_notifier = {
958 .notifier_call =fib_netdev_event,
961 static int __net_init ip_fib_net_init(struct net *net)
965 net->ipv4.fib_table_hash = kzalloc(
966 sizeof(struct hlist_head)*FIB_TABLE_HASHSZ, GFP_KERNEL);
967 if (net->ipv4.fib_table_hash == NULL)
970 for (i = 0; i < FIB_TABLE_HASHSZ; i++)
971 INIT_HLIST_HEAD(&net->ipv4.fib_table_hash[i]);
973 return fib4_rules_init(net);
976 static void __net_exit ip_fib_net_exit(struct net *net)
980 #ifdef CONFIG_IP_MULTIPLE_TABLES
981 fib4_rules_exit(net);
984 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
985 struct fib_table *tb;
986 struct hlist_head *head;
987 struct hlist_node *node, *tmp;
989 head = &net->ipv4.fib_table_hash[i];
990 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
996 kfree(net->ipv4.fib_table_hash);
999 static int __net_init fib_net_init(struct net *net)
1003 error = ip_fib_net_init(net);
1006 error = nl_fib_lookup_init(net);
1009 error = fib_proc_init(net);
1016 nl_fib_lookup_exit(net);
1018 ip_fib_net_exit(net);
1022 static void __net_exit fib_net_exit(struct net *net)
1025 nl_fib_lookup_exit(net);
1026 ip_fib_net_exit(net);
1029 static struct pernet_operations fib_net_ops = {
1030 .init = fib_net_init,
1031 .exit = fib_net_exit,
1034 void __init ip_fib_init(void)
1036 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL);
1037 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL);
1038 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib);
1040 register_pernet_subsys(&fib_net_ops);
1041 register_netdevice_notifier(&fib_netdev_notifier);
1042 register_inetaddr_notifier(&fib_inetaddr_notifier);
1047 EXPORT_SYMBOL(inet_addr_type);
1048 EXPORT_SYMBOL(inet_dev_addr_type);
1049 EXPORT_SYMBOL(ip_dev_find);