X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fipv4%2Froute.c;h=7b5e8e1d94be2eb19764880c99597854f34657f3;hb=6f58d79598c184dc4f18ceddfaa5fa7483bce0a5;hp=a7651c64bb48c62b84841462bbda9d383fcbed54;hpb=da0e28cb68a7e22b47c6ae1a5b12cb538c13c69f;p=linux-2.6 diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a7651c64bb..7b5e8e1d94 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -117,8 +117,6 @@ #define RT_GC_TIMEOUT (300*HZ) -static int ip_rt_min_delay = 2 * HZ; -static int ip_rt_max_delay = 10 * HZ; static int ip_rt_max_size; static int ip_rt_gc_timeout = RT_GC_TIMEOUT; static int ip_rt_gc_interval = 60 * HZ; @@ -133,12 +131,9 @@ static int ip_rt_mtu_expires = 10 * 60 * HZ; static int ip_rt_min_pmtu = 512 + 20 + 20; static int ip_rt_min_advmss = 256; static int ip_rt_secret_interval = 10 * 60 * HZ; -static int ip_rt_flush_expected; -static unsigned long rt_deadline; #define RTprint(a...) printk(KERN_DEBUG a) -static struct timer_list rt_flush_timer; static void rt_worker_func(struct work_struct *work); static DECLARE_DELAYED_WORK(expires_work, rt_worker_func); static struct timer_list rt_secret_timer; @@ -169,6 +164,7 @@ static struct dst_ops ipv4_dst_ops = { .update_pmtu = ip_rt_update_pmtu, .local_out = ip_local_out, .entry_size = sizeof(struct rtable), + .entries = ATOMIC_INIT(0), }; #define ECN_OR_COST(class) TC_PRIO_##class @@ -259,19 +255,16 @@ static inline void rt_hash_lock_init(void) static struct rt_hash_bucket *rt_hash_table; static unsigned rt_hash_mask; static unsigned int rt_hash_log; -static unsigned int rt_hash_rnd; +static atomic_t rt_genid; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) \ (__raw_get_cpu_var(rt_cache_stat).field++) -static int rt_intern_hash(unsigned hash, struct rtable *rth, - struct rtable **res); - static unsigned int rt_hash_code(u32 daddr, u32 saddr) { - return (jhash_2words(daddr, saddr, rt_hash_rnd) - & rt_hash_mask); + return jhash_2words(daddr, saddr, atomic_read(&rt_genid)) + & rt_hash_mask; } #define rt_hash(daddr, saddr, idx) \ @@ -281,27 +274,28 @@ static unsigned int rt_hash_code(u32 daddr, u32 saddr) #ifdef CONFIG_PROC_FS struct rt_cache_iter_state { int bucket; + int genid; }; -static struct rtable *rt_cache_get_first(struct seq_file *seq) +static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st) { struct rtable *r = NULL; - struct rt_cache_iter_state *st = seq->private; for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { rcu_read_lock_bh(); - r = rt_hash_table[st->bucket].chain; - if (r) - break; + r = rcu_dereference(rt_hash_table[st->bucket].chain); + while (r) { + if (r->rt_genid == st->genid) + return r; + r = rcu_dereference(r->u.dst.rt_next); + } rcu_read_unlock_bh(); } - return rcu_dereference(r); + return r; } -static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r) +static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st, struct rtable *r) { - struct rt_cache_iter_state *st = seq->private; - r = r->u.dst.rt_next; while (!r) { rcu_read_unlock_bh(); @@ -313,29 +307,38 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r) return rcu_dereference(r); } -static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos) +static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos) { - struct rtable *r = rt_cache_get_first(seq); + struct rtable *r = rt_cache_get_first(st); if (r) - while (pos && (r = rt_cache_get_next(seq, r))) + while (pos && (r = rt_cache_get_next(st, r))) { + if (r->rt_genid != st->genid) + continue; --pos; + } return pos ? NULL : r; } static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { - return *pos ? rt_cache_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; + struct rt_cache_iter_state *st = seq->private; + + if (*pos) + return rt_cache_get_idx(st, *pos - 1); + st->genid = atomic_read(&rt_genid); + return SEQ_START_TOKEN; } static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct rtable *r = NULL; + struct rtable *r; + struct rt_cache_iter_state *st = seq->private; if (v == SEQ_START_TOKEN) - r = rt_cache_get_first(seq); + r = rt_cache_get_first(st); else - r = rt_cache_get_next(seq, v); + r = rt_cache_get_next(st, v); ++*pos; return r; } @@ -539,12 +542,11 @@ static __init int ip_rt_proc_init(struct net *net) if (!pde) goto err1; - pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat); + pde = proc_create("rt_cache", S_IRUGO, + net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; - pde->proc_fops = &rt_cpu_seq_fops; - #ifdef CONFIG_NET_CLS_ROUTE pde = create_proc_read_entry("rt_acct", 0, net->proc_net, ip_rt_acct_read, NULL); @@ -648,6 +650,11 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) (fl1->iif ^ fl2->iif)) == 0; } +static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) +{ + return rt1->u.dst.dev->nd_net == rt2->u.dst.dev->nd_net; +} + /* * Perform a full scan of hash table and free all entries. * Can be called by a softirq or a process. @@ -703,6 +710,11 @@ static void rt_check_expire(void) continue; spin_lock_bh(rt_hash_lock_addr(i)); while ((rth = *rthp) != NULL) { + if (rth->rt_genid != atomic_read(&rt_genid)) { + *rthp = rth->u.dst.rt_next; + rt_free(rth); + continue; + } if (rth->u.dst.expires) { /* Entry is expired even if it is in use */ if (time_before_eq(jiffies, rth->u.dst.expires)) { @@ -727,83 +739,45 @@ static void rt_check_expire(void) /* * rt_worker_func() is run in process context. - * If a whole flush was scheduled, it is done. - * Else, we call rt_check_expire() to scan part of the hash table + * we call rt_check_expire() to scan part of the hash table */ static void rt_worker_func(struct work_struct *work) { - if (ip_rt_flush_expected) { - ip_rt_flush_expected = 0; - rt_do_flush(1); - } else - rt_check_expire(); + rt_check_expire(); schedule_delayed_work(&expires_work, ip_rt_gc_interval); } -/* This can run from both BH and non-BH contexts, the latter - * in the case of a forced flush event. +/* + * Pertubation of rt_genid by a small quantity [1..256] + * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() + * many times (2^24) without giving recent rt_genid. + * Jenkins hash is strong enough that litle changes of rt_genid are OK. */ -static void rt_run_flush(unsigned long process_context) +static void rt_cache_invalidate(void) { - rt_deadline = 0; + unsigned char shuffle; - get_random_bytes(&rt_hash_rnd, 4); - - rt_do_flush(process_context); + get_random_bytes(&shuffle, sizeof(shuffle)); + atomic_add(shuffle + 1U, &rt_genid); } -static DEFINE_SPINLOCK(rt_flush_lock); - +/* + * delay < 0 : invalidate cache (fast : entries will be deleted later) + * delay >= 0 : invalidate & flush cache (can be long) + */ void rt_cache_flush(int delay) { - unsigned long now = jiffies; - int user_mode = !in_softirq(); - - if (delay < 0) - delay = ip_rt_min_delay; - - spin_lock_bh(&rt_flush_lock); - - if (del_timer(&rt_flush_timer) && delay > 0 && rt_deadline) { - long tmo = (long)(rt_deadline - now); - - /* If flush timer is already running - and flush request is not immediate (delay > 0): - - if deadline is not achieved, prolongate timer to "delay", - otherwise fire it at deadline time. - */ - - if (user_mode && tmo < ip_rt_max_delay-ip_rt_min_delay) - tmo = 0; - - if (delay > tmo) - delay = tmo; - } - - if (delay <= 0) { - spin_unlock_bh(&rt_flush_lock); - rt_run_flush(user_mode); - return; - } - - if (rt_deadline == 0) - rt_deadline = now + ip_rt_max_delay; - - mod_timer(&rt_flush_timer, now+delay); - spin_unlock_bh(&rt_flush_lock); + rt_cache_invalidate(); + if (delay >= 0) + rt_do_flush(!in_softirq()); } /* - * We change rt_hash_rnd and ask next rt_worker_func() invocation - * to perform a flush in process context + * We change rt_genid and let gc do the cleanup */ static void rt_secret_rebuild(unsigned long dummy) { - get_random_bytes(&rt_hash_rnd, 4); - ip_rt_flush_expected = 1; - cancel_delayed_work(&expires_work); - schedule_delayed_work(&expires_work, HZ/10); + rt_cache_invalidate(); mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval); } @@ -880,7 +854,8 @@ static int rt_garbage_collect(struct dst_ops *ops) rthp = &rt_hash_table[k].chain; spin_lock_bh(rt_hash_lock_addr(k)); while ((rth = *rthp) != NULL) { - if (!rt_may_expire(rth, tmo, expire)) { + if (rth->rt_genid == atomic_read(&rt_genid) && + !rt_may_expire(rth, tmo, expire)) { tmo >>= 1; rthp = &rth->u.dst.rt_next; continue; @@ -961,7 +936,12 @@ restart: spin_lock_bh(rt_hash_lock_addr(hash)); while ((rth = *rthp) != NULL) { - if (compare_keys(&rth->fl, &rt->fl)) { + if (rth->rt_genid != atomic_read(&rt_genid)) { + *rthp = rth->u.dst.rt_next; + rt_free(rth); + continue; + } + if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { /* Put it first */ *rthp = rth->u.dst.rt_next; /* @@ -1126,17 +1106,19 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) static void rt_del(unsigned hash, struct rtable *rt) { - struct rtable **rthp; + struct rtable **rthp, *aux; + rthp = &rt_hash_table[hash].chain; spin_lock_bh(rt_hash_lock_addr(hash)); ip_rt_put(rt); - for (rthp = &rt_hash_table[hash].chain; *rthp; - rthp = &(*rthp)->u.dst.rt_next) - if (*rthp == rt) { - *rthp = rt->u.dst.rt_next; - rt_free(rt); - break; + while ((aux = *rthp) != NULL) { + if (aux == rt || (aux->rt_genid != atomic_read(&rt_genid))) { + *rthp = aux->u.dst.rt_next; + rt_free(aux); + continue; } + rthp = &aux->u.dst.rt_next; + } spin_unlock_bh(rt_hash_lock_addr(hash)); } @@ -1181,7 +1163,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, if (rth->fl.fl4_dst != daddr || rth->fl.fl4_src != skeys[i] || rth->fl.oif != ikeys[k] || - rth->fl.iif != 0) { + rth->fl.iif != 0 || + rth->rt_genid != atomic_read(&rt_genid)) { rthp = &rth->u.dst.rt_next; continue; } @@ -1219,7 +1202,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, rt->u.dst.neighbour = NULL; rt->u.dst.hh = NULL; rt->u.dst.xfrm = NULL; - + rt->rt_genid = atomic_read(&rt_genid); rt->rt_flags |= RTCF_REDIRECTED; /* Gateway is different ... */ @@ -1415,7 +1398,8 @@ static __inline__ unsigned short guess_mtu(unsigned short old_mtu) return 68; } -unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu) +unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, + unsigned short new_mtu) { int i; unsigned short old_mtu = ntohs(iph->tot_len); @@ -1438,7 +1422,9 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu) rth->rt_dst == daddr && rth->rt_src == iph->saddr && rth->fl.iif == 0 && - !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) { + !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) && + rth->u.dst.dev->nd_net == net && + rth->rt_genid == atomic_read(&rt_genid)) { unsigned short mtu = new_mtu; if (new_mtu < 68 || new_mtu >= old_mtu) { @@ -1559,7 +1545,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) if (rt->fl.iif == 0) src = rt->rt_src; - else if (fib_lookup(&init_net, &rt->fl, &res) == 0) { + else if (fib_lookup(rt->u.dst.dev->nd_net, &rt->fl, &res) == 0) { src = FIB_RES_PREFSRC(res); fib_res_put(&res); } else @@ -1673,8 +1659,9 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.oif = 0; rth->rt_gateway = daddr; rth->rt_spec_dst= spec_dst; - rth->rt_type = RTN_MULTICAST; + rth->rt_genid = atomic_read(&rt_genid); rth->rt_flags = RTCF_MULTICAST; + rth->rt_type = RTN_MULTICAST; if (our) { rth->u.dst.input= ip_local_deliver; rth->rt_flags |= RTCF_LOCAL; @@ -1813,6 +1800,7 @@ static inline int __mkroute_input(struct sk_buff *skb, rth->u.dst.input = ip_forward; rth->u.dst.output = ip_output; + rth->rt_genid = atomic_read(&rt_genid); rt_set_nexthop(rth, res, itag); @@ -1881,6 +1869,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, __be32 spec_dst; int err = -EINVAL; int free_res = 0; + struct net * net = dev->nd_net; /* IP on this device is disabled. */ @@ -1911,7 +1900,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, /* * Now we are ready to route packet. */ - if ((err = fib_lookup(&init_net, &fl, &res)) != 0) { + if ((err = fib_lookup(net, &fl, &res)) != 0) { if (!IN_DEV_FORWARD(in_dev)) goto e_hostunreach; goto no_route; @@ -1926,7 +1915,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res.type == RTN_LOCAL) { int result; result = fib_validate_source(saddr, daddr, tos, - init_net.loopback_dev->ifindex, + net->loopback_dev->ifindex, dev, &spec_dst, &itag); if (result < 0) goto martian_source; @@ -1972,6 +1961,7 @@ local_input: goto e_nobufs; rth->u.dst.output= ip_rt_bug; + rth->rt_genid = atomic_read(&rt_genid); atomic_set(&rth->u.dst.__refcnt, 1); rth->u.dst.flags= DST_HOST; @@ -1988,7 +1978,7 @@ local_input: #endif rth->rt_iif = rth->fl.iif = dev->ifindex; - rth->u.dst.dev = init_net.loopback_dev; + rth->u.dst.dev = net->loopback_dev; dev_hold(rth->u.dst.dev); rth->idev = in_dev_get(rth->u.dst.dev); rth->rt_gateway = daddr; @@ -2048,7 +2038,9 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct rtable * rth; unsigned hash; int iif = dev->ifindex; + struct net *net; + net = dev->nd_net; tos &= IPTOS_RT_MASK; hash = rt_hash(daddr, saddr, iif); @@ -2060,7 +2052,9 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->fl.iif == iif && rth->fl.oif == 0 && rth->fl.mark == skb->mark && - rth->fl.fl4_tos == tos) { + rth->fl.fl4_tos == tos && + rth->u.dst.dev->nd_net == net && + rth->rt_genid == atomic_read(&rt_genid)) { dst_use(&rth->u.dst, jiffies); RT_CACHE_STAT_INC(in_hit); rcu_read_unlock(); @@ -2188,6 +2182,7 @@ static inline int __mkroute_output(struct rtable **result, rth->rt_spec_dst= fl->fl4_src; rth->u.dst.output=ip_output; + rth->rt_genid = atomic_read(&rt_genid); RT_CACHE_STAT_INC(out_slow_tot); @@ -2247,7 +2242,8 @@ static inline int ip_mkroute_output(struct rtable **rp, * Major route resolver routine. */ -static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) +static int ip_route_output_slow(struct net *net, struct rtable **rp, + const struct flowi *oldflp) { u32 tos = RT_FL_TOS(oldflp); struct flowi fl = { .nl_u = { .ip4_u = @@ -2259,7 +2255,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) RT_SCOPE_UNIVERSE), } }, .mark = oldflp->mark, - .iif = init_net.loopback_dev->ifindex, + .iif = net->loopback_dev->ifindex, .oif = oldflp->oif }; struct fib_result res; unsigned flags = 0; @@ -2281,14 +2277,14 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) goto out; /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ - dev_out = ip_dev_find(oldflp->fl4_src); + dev_out = ip_dev_find(net, oldflp->fl4_src); if (dev_out == NULL) goto out; /* I removed check for oif == dev_out->oif here. It was wrong for two reasons: - 1. ip_dev_find(saddr) can return wrong iface, if saddr is - assigned to multiple interfaces. + 1. ip_dev_find(net, saddr) can return wrong iface, if saddr + is assigned to multiple interfaces. 2. Moreover, we are allowed to send packets with saddr of another iface. --ANK */ @@ -2321,7 +2317,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) if (oldflp->oif) { - dev_out = dev_get_by_index(&init_net, oldflp->oif); + dev_out = dev_get_by_index(net, oldflp->oif); err = -ENODEV; if (dev_out == NULL) goto out; @@ -2355,15 +2351,15 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK); if (dev_out) dev_put(dev_out); - dev_out = init_net.loopback_dev; + dev_out = net->loopback_dev; dev_hold(dev_out); - fl.oif = init_net.loopback_dev->ifindex; + fl.oif = net->loopback_dev->ifindex; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; } - if (fib_lookup(&init_net, &fl, &res)) { + if (fib_lookup(net, &fl, &res)) { res.fi = NULL; if (oldflp->oif) { /* Apparently, routing tables are wrong. Assume, @@ -2402,7 +2398,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) fl.fl4_src = fl.fl4_dst; if (dev_out) dev_put(dev_out); - dev_out = init_net.loopback_dev; + dev_out = net->loopback_dev; dev_hold(dev_out); fl.oif = dev_out->ifindex; if (res.fi) @@ -2418,7 +2414,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) else #endif if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif) - fib_select_default(&fl, &res); + fib_select_default(net, &fl, &res); if (!fl.fl4_src) fl.fl4_src = FIB_RES_PREFSRC(res); @@ -2441,7 +2437,8 @@ make_route: out: return err; } -int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) +int __ip_route_output_key(struct net *net, struct rtable **rp, + const struct flowi *flp) { unsigned hash; struct rtable *rth; @@ -2457,7 +2454,9 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) rth->fl.oif == flp->oif && rth->fl.mark == flp->mark && !((rth->fl.fl4_tos ^ flp->fl4_tos) & - (IPTOS_RT_MASK | RTO_ONLINK))) { + (IPTOS_RT_MASK | RTO_ONLINK)) && + rth->u.dst.dev->nd_net == net && + rth->rt_genid == atomic_read(&rt_genid)) { dst_use(&rth->u.dst, jiffies); RT_CACHE_STAT_INC(out_hit); rcu_read_unlock_bh(); @@ -2468,7 +2467,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp) } rcu_read_unlock_bh(); - return ip_route_output_slow(rp, flp); + return ip_route_output_slow(net, rp, flp); } EXPORT_SYMBOL_GPL(__ip_route_output_key); @@ -2484,6 +2483,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .check = ipv4_dst_check, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .entry_size = sizeof(struct rtable), + .entries = ATOMIC_INIT(0), }; @@ -2511,6 +2511,7 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock rt->idev = ort->idev; if (rt->idev) in_dev_hold(rt->idev); + rt->rt_genid = atomic_read(&rt_genid); rt->rt_flags = ort->rt_flags; rt->rt_type = ort->rt_type; rt->rt_dst = ort->rt_dst; @@ -2530,11 +2531,12 @@ static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock return (rt ? 0 : -ENOMEM); } -int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags) +int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, + struct sock *sk, int flags) { int err; - if ((err = __ip_route_output_key(rp, flp)) != 0) + if ((err = __ip_route_output_key(net, rp, flp)) != 0) return err; if (flp->proto) { @@ -2555,9 +2557,9 @@ int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, EXPORT_SYMBOL_GPL(ip_route_output_flow); -int ip_route_output_key(struct rtable **rp, struct flowi *flp) +int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) { - return ip_route_output_flow(rp, flp, NULL, 0); + return ip_route_output_flow(net, rp, flp, NULL, 0); } static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, @@ -2724,7 +2726,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void }, .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, }; - err = ip_route_output_key(&rt, &fl); + err = ip_route_output_key(&init_net, &rt, &fl); } if (err) @@ -2764,6 +2766,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) rt = rcu_dereference(rt->u.dst.rt_next), idx++) { if (idx < s_idx) continue; + if (rt->rt_genid != atomic_read(&rt_genid)) + continue; skb->dst = dst_clone(&rt->u.dst); if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, @@ -2832,24 +2836,6 @@ ctl_table ipv4_route_table[] = { .proc_handler = &ipv4_sysctl_rtcache_flush, .strategy = &ipv4_sysctl_rtcache_flush_strategy, }, - { - .ctl_name = NET_IPV4_ROUTE_MIN_DELAY, - .procname = "min_delay", - .data = &ip_rt_min_delay, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, - }, - { - .ctl_name = NET_IPV4_ROUTE_MAX_DELAY, - .procname = "max_delay", - .data = &ip_rt_max_delay, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, - .strategy = &sysctl_jiffies, - }, { .ctl_name = NET_IPV4_ROUTE_GC_THRESH, .procname = "gc_thresh", @@ -3008,8 +2994,8 @@ int __init ip_rt_init(void) { int rc = 0; - rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^ - (jiffies ^ (jiffies >> 7))); + atomic_set(&rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^ + (jiffies ^ (jiffies >> 7)))); #ifdef CONFIG_NET_CLS_ROUTE ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); @@ -3042,7 +3028,6 @@ int __init ip_rt_init(void) devinet_init(); ip_fib_init(); - setup_timer(&rt_flush_timer, rt_run_flush, 0); setup_timer(&rt_secret_timer, rt_secret_rebuild, 0); /* All the timers, started at system startup tend