]> err.no Git - linux-2.6/blobdiff - net/ipv4/route.c
[PATCH] pipe: remove redundant fifo_poll abstraction
[linux-2.6] / net / ipv4 / route.c
index daf82f8d3c4a6a45e92819c1fe41167754dc890a..8c0b14e3beecc761e08c684cea964720fd2b2b97 100644 (file)
@@ -54,7 +54,7 @@
  *             Marc Boucher    :       routing by fwmark
  *     Robert Olsson           :       Added rt_cache statistics
  *     Arnaldo C. Melo         :       Convert proc stuff to seq_file
- *     Eric Dumazet            :       hashed spinlocks
+ *     Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
  *
  *             This program is free software; you can redistribute it and/or
  *             modify it under the terms of the GNU General Public License
@@ -71,6 +71,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/bootmem.h>
 #include <linux/string.h>
 #include <linux/socket.h>
 #include <linux/sockios.h>
@@ -239,7 +240,9 @@ static unsigned                     rt_hash_mask;
 static int                     rt_hash_log;
 static unsigned int            rt_hash_rnd;
 
-struct rt_cache_stat *rt_cache_stat;
+static struct rt_cache_stat *rt_cache_stat;
+#define RT_CACHE_STAT_INC(field)                                         \
+               (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
                                struct rtable **res);
@@ -605,18 +608,25 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
 /* This runs via a timer and thus is always in BH context. */
 static void rt_check_expire(unsigned long dummy)
 {
-       static int rover;
-       int i = rover, t;
+       static unsigned int rover;
+       unsigned int i = rover, goal;
        struct rtable *rth, **rthp;
        unsigned long now = jiffies;
-
-       for (t = ip_rt_gc_interval << rt_hash_log; t >= 0;
-            t -= ip_rt_gc_timeout) {
+       u64 mult;
+
+       mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask) goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
                unsigned long tmo = ip_rt_gc_timeout;
 
                i = (i + 1) & rt_hash_mask;
                rthp = &rt_hash_table[i].chain;
 
+               if (*rthp == 0)
+                       continue;
                spin_lock(rt_hash_lock_addr(i));
                while ((rth = *rthp) != NULL) {
                        if (rth->u.dst.expires) {
@@ -657,7 +667,7 @@ static void rt_check_expire(unsigned long dummy)
                        break;
        }
        rover = i;
-       mod_timer(&rt_periodic_timer, now + ip_rt_gc_interval);
+       mod_timer(&rt_periodic_timer, jiffies + ip_rt_gc_interval);
 }
 
 /* This can run from both BH and non-BH contexts, the latter
@@ -1677,7 +1687,7 @@ static void ip_handle_martian_source(struct net_device *dev,
                printk(KERN_WARNING "martian source %u.%u.%u.%u from "
                        "%u.%u.%u.%u, on dev %s\n",
                        NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
-               if (dev->hard_header_len) {
+               if (dev->hard_header_len && skb->mac.raw) {
                        int i;
                        unsigned char *p = skb->mac.raw;
                        printk(KERN_WARNING "ll header: ");
@@ -2592,6 +2602,8 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
        return ip_route_output_slow(rp, flp);
 }
 
+EXPORT_SYMBOL_GPL(__ip_route_output_key);
+
 int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk, int flags)
 {
        int err;
@@ -2610,6 +2622,8 @@ int ip_route_output_flow(struct rtable **rp, struct flowi *flp, struct sock *sk,
        return 0;
 }
 
+EXPORT_SYMBOL_GPL(ip_route_output_flow);
+
 int ip_route_output_key(struct rtable **rp, struct flowi *flp)
 {
        return ip_route_output_flow(rp, flp, NULL, 0);
@@ -3103,12 +3117,14 @@ __setup("rhash_entries=", set_rhash_entries);
 
 int __init ip_rt_init(void)
 {
-       int order, goal, rc = 0;
+       int rc = 0;
 
        rt_hash_rnd = (int) ((num_physpages ^ (num_physpages>>8)) ^
                             (jiffies ^ (jiffies >> 7)));
 
 #ifdef CONFIG_NET_CLS_ROUTE
+       {
+       int order;
        for (order = 0;
             (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
                /* NOTHING */;
@@ -3116,6 +3132,7 @@ int __init ip_rt_init(void)
        if (!ip_rt_acct)
                panic("IP: failed to allocate ip_rt_acct\n");
        memset(ip_rt_acct, 0, PAGE_SIZE << order);
+       }
 #endif
 
        ipv4_dst_ops.kmem_cachep = kmem_cache_create("ip_dst_cache",
@@ -3126,32 +3143,17 @@ int __init ip_rt_init(void)
        if (!ipv4_dst_ops.kmem_cachep)
                panic("IP: failed to allocate ip_dst_cache\n");
 
-       goal = num_physpages >> (26 - PAGE_SHIFT);
-       if (rhash_entries)
-               goal = (rhash_entries * sizeof(struct rt_hash_bucket)) >> PAGE_SHIFT;
-       for (order = 0; (1UL << order) < goal; order++)
-               /* NOTHING */;
-
-       do {
-               rt_hash_mask = (1UL << order) * PAGE_SIZE /
-                       sizeof(struct rt_hash_bucket);
-               while (rt_hash_mask & (rt_hash_mask - 1))
-                       rt_hash_mask--;
-               rt_hash_table = (struct rt_hash_bucket *)
-                       __get_free_pages(GFP_ATOMIC, order);
-       } while (rt_hash_table == NULL && --order > 0);
-
-       if (!rt_hash_table)
-               panic("Failed to allocate IP route cache hash table\n");
-
-       printk(KERN_INFO "IP: routing cache hash table of %u buckets, %ldKbytes\n",
-              rt_hash_mask,
-              (long) (rt_hash_mask * sizeof(struct rt_hash_bucket)) / 1024);
-
-       for (rt_hash_log = 0; (1 << rt_hash_log) != rt_hash_mask; rt_hash_log++)
-               /* NOTHING */;
-
-       rt_hash_mask--;
+       rt_hash_table = (struct rt_hash_bucket *)
+               alloc_large_system_hash("IP route cache",
+                                       sizeof(struct rt_hash_bucket),
+                                       rhash_entries,
+                                       (num_physpages >= 128 * 1024) ?
+                                               (27 - PAGE_SHIFT) :
+                                               (29 - PAGE_SHIFT),
+                                       HASH_HIGHMEM,
+                                       &rt_hash_log,
+                                       &rt_hash_mask,
+                                       0);
        memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
        rt_hash_lock_init();