]> err.no Git - linux-2.6/commitdiff
[PATCH] for_each_possible_cpu: network codes
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tue, 11 Apr 2006 05:52:50 +0000 (22:52 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 11 Apr 2006 13:18:31 +0000 (06:18 -0700)
for_each_cpu() actually iterates across all possible CPUs.  We've had mistakes
in the past where people were using for_each_cpu() where they should have been
iterating across only online or present CPUs.  This is inefficient and
possibly buggy.

We're renaming for_each_cpu() to for_each_possible_cpu() to avoid this in the
future.

This patch replaces for_each_cpu with for_each_possible_cpu under /net

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
20 files changed:
net/bridge/netfilter/ebtables.c
net/core/dev.c
net/core/flow.c
net/core/neighbour.c
net/core/utils.c
net/ipv4/icmp.c
net/ipv4/ipcomp.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_conntrack_core.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv6/icmp.c
net/ipv6/ipcomp6.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/proc.c
net/netfilter/nf_conntrack_core.c
net/netfilter/x_tables.c
net/sctp/proc.c
net/socket.c

index 01eae97c53d9db212d1e52b8209a9067961dea67..66bd93252c4e0b02286d5018e473ae79750698ea 100644 (file)
@@ -829,7 +829,7 @@ static int translate_table(struct ebt_replace *repl,
                                                * sizeof(struct ebt_chainstack));
                if (!newinfo->chainstack)
                        return -ENOMEM;
-               for_each_cpu(i) {
+               for_each_possible_cpu(i) {
                        newinfo->chainstack[i] =
                           vmalloc(udc_cnt * sizeof(struct ebt_chainstack));
                        if (!newinfo->chainstack[i]) {
@@ -901,7 +901,7 @@ static void get_counters(struct ebt_counter *oldcounters,
               sizeof(struct ebt_counter) * nentries);
 
        /* add other counters to those of cpu 0 */
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == 0)
                        continue;
                counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
@@ -1036,7 +1036,7 @@ static int do_replace(void __user *user, unsigned int len)
 
        vfree(table->entries);
        if (table->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(table->chainstack[i]);
                vfree(table->chainstack);
        }
@@ -1054,7 +1054,7 @@ free_counterstmp:
        vfree(counterstmp);
        /* can be initialized in translate_table() */
        if (newinfo->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(newinfo->chainstack[i]);
                vfree(newinfo->chainstack);
        }
@@ -1201,7 +1201,7 @@ free_unlock:
        mutex_unlock(&ebt_mutex);
 free_chainstack:
        if (newinfo->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(newinfo->chainstack[i]);
                vfree(newinfo->chainstack);
        }
@@ -1224,7 +1224,7 @@ void ebt_unregister_table(struct ebt_table *table)
        mutex_unlock(&ebt_mutex);
        vfree(table->private->entries);
        if (table->private->chainstack) {
-               for_each_cpu(i)
+               for_each_possible_cpu(i)
                        vfree(table->private->chainstack[i]);
                vfree(table->private->chainstack);
        }
index 2731570eba5b35a21c311dd587057c39805082f1..83231a27ae028caa5c0757d35c9d107220e8711e 100644 (file)
@@ -3346,7 +3346,7 @@ static int __init net_dev_init(void)
         *      Initialise the packet receive queues.
         */
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct softnet_data *queue;
 
                queue = &per_cpu(softnet_data, i);
index 885a2f655db08ea1c44810876611b805fb261da4..2191af5f26acbfe61f6e1633de7d40cfbd83c27c 100644 (file)
@@ -79,7 +79,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
 {
        int i;
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_hash_rnd_recalc(i) = 1;
 
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
@@ -361,7 +361,7 @@ static int __init flow_cache_init(void)
        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
        add_timer(&flow_hash_rnd_timer);
 
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
                flow_cache_cpu_prepare(i);
 
        hotcpu_notifier(flow_cache_cpu, 0);
index 2ec8693fb778f581dd114838700131d810016e3d..4cf878efdb49eb9fc7d02b7b41f83601ab29d079 100644 (file)
@@ -1627,7 +1627,7 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
 
                memset(&ndst, 0, sizeof(ndst));
 
-               for_each_cpu(cpu) {
+               for_each_possible_cpu(cpu) {
                        struct neigh_statistics *st;
 
                        st = per_cpu_ptr(tbl->stats, cpu);
index fdc4f38bc46ccfbcc86c4a36698489cfcd0ba08b..4f96f389243d7d6322288609f6477c74eab5ac09 100644 (file)
@@ -121,7 +121,7 @@ void __init net_random_init(void)
 {
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, i+jiffies);
        }
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
        unsigned long seed[NR_CPUS];
 
        get_random_bytes(seed, sizeof(seed));
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
                __net_srandom(state, seed[i]);
        }
index 9831fd2c73a0a222290f504e44d413627c5fe216..2a0455911ee0a9c9119472c01d4d4717f58e190b 100644 (file)
@@ -1107,7 +1107,7 @@ void __init icmp_init(struct net_proto_family *ops)
        struct inet_sock *inet;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int err;
 
                err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
index 0a1d86a0f63289844a7dafccf5a726644ae38f45..04a429465665cd8ff0c377eed4d1caf0d40a4a3a 100644 (file)
@@ -290,7 +290,7 @@ static void ipcomp_free_scratches(void)
        if (!scratches)
                return;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = *per_cpu_ptr(scratches, i);
                if (scratch)
                        vfree(scratch);
@@ -313,7 +313,7 @@ static void **ipcomp_alloc_scratches(void)
 
        ipcomp_scratches = scratches;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
                if (!scratch)
                        return NULL;
@@ -344,7 +344,7 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
        if (!tfms)
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
                crypto_free_tfm(tfm);
        }
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
        if (!tfms)
                goto error;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
                if (!tfm)
                        goto error;
index a44a5d73457da49a275d8c55a475a119e514db8f..c2d92f99a2b8b0b18bc1f17e1ec742a2c6051de5 100644 (file)
@@ -646,7 +646,7 @@ static int translate_table(const char *name,
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
@@ -696,7 +696,7 @@ static void get_counters(const struct xt_table_info *t,
                           counters,
                           &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
index ceaabc18202b71e8b26c7bdbb23fa08fc0c9a780..979a2eac6f003101438ecd518b1c4d9c2c365bf8 100644 (file)
@@ -133,7 +133,7 @@ static void ip_ct_event_cache_flush(void)
        struct ip_conntrack_ecache *ecache;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ecache = &per_cpu(ip_conntrack_ecache, cpu);
                if (ecache->ct)
                        ip_conntrack_put(ecache->ct);
index d5b8cdd361ce875bfe7c078f1f173db58153c85e..d25ac8ba6ebaedb0e0e1cbe658b80c94e0814b39 100644 (file)
@@ -735,7 +735,7 @@ translate_table(const char *name,
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
@@ -788,7 +788,7 @@ get_counters(const struct xt_table_info *t,
                          counters,
                          &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
index 1b167c4bb3beb0254f446a6ab21ca41fd87a0455..d61e2a9d394d24d22d282deabba9fafeb088affa 100644 (file)
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
@@ -91,7 +91,7 @@ fold_field(void *mib[], int offt)
        unsigned long res = 0;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
                res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
        }
index 94fcbc5e5a1b62cfd696a6751dfcd17816a74f4a..ff434821909f12f26a6d142d3c3ae843580deed3 100644 (file)
@@ -3083,7 +3083,7 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
                memcpy(dst, src, length);
 
                /* Add the other cpus in, one int at a time */
-               for_each_cpu(i) {
+               for_each_possible_cpu(i) {
                        unsigned int j;
 
                        src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
index 21eb725e885ffe85bfa72d1f35ad9993911cd68f..1044b6fce0d5d472b11b0eef3a2e261650869e5e 100644 (file)
@@ -717,7 +717,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
        struct sock *sk;
        int err, i, j;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
                                       &per_cpu(__icmpv6_socket, i));
                if (err < 0) {
@@ -763,7 +763,7 @@ void icmpv6_cleanup(void)
 {
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                sock_release(per_cpu(__icmpv6_socket, i));
        }
        inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
index 00f3fadfcca7f4f736d1038d2919ba3e8d668296..05eb67def39f17e9c9b84d9270fdbe6fe1ee2a87 100644 (file)
@@ -290,7 +290,7 @@ static void ipcomp6_free_scratches(void)
        if (!scratches)
                return;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = *per_cpu_ptr(scratches, i);
 
                vfree(scratch);
@@ -313,7 +313,7 @@ static void **ipcomp6_alloc_scratches(void)
 
        ipcomp6_scratches = scratches;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
                if (!scratch)
                        return NULL;
@@ -344,7 +344,7 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
        if (!tfms)
                return;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
                crypto_free_tfm(tfm);
        }
@@ -384,7 +384,7 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
        if (!tfms)
                goto error;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
                if (!tfm)
                        goto error;
index 3ecf2db841f80802f2971faa34e592729339ee96..642b4b11464f1594c28cc99550fccd809d96d4df 100644 (file)
@@ -788,7 +788,7 @@ translate_table(const char *name,
        }
 
        /* And one copy for every other CPU */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                if (newinfo->entries[i] && newinfo->entries[i] != entry0)
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
@@ -841,7 +841,7 @@ get_counters(const struct xt_table_info *t,
                           counters,
                           &i);
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (cpu == curcpu)
                        continue;
                i = 0;
index 4238b1ed886012a331b1c5dd16ddffa1ea2eaaae..779ddf77f4d41d0455d449ad2e4b4f8eeaf8f984 100644 (file)
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
        int res = 0;
        int cpu;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                res += proto->stats[cpu].inuse;
 
        return res;
@@ -140,7 +140,7 @@ fold_field(void *mib[], int offt)
         unsigned long res = 0;
         int i;
  
-        for_each_cpu(i) {
+        for_each_possible_cpu(i) {
                 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
                 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
         }
index 56389c83557c6f791c0c01dfaa5f3088e489898c..e581190fb6c374ff691679e780dffeec6c213cd3 100644 (file)
@@ -146,7 +146,7 @@ static void nf_ct_event_cache_flush(void)
        struct nf_conntrack_ecache *ecache;
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ecache = &per_cpu(nf_conntrack_ecache, cpu);
                if (ecache->ct)
                        nf_ct_put(ecache->ct);
index feb8a9e066b08939f4988f23d3001b859ef31b3b..00cf0a4f4d92ab3a54ca6b503a83d7312efd74cb 100644 (file)
@@ -413,7 +413,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
 
        newinfo->size = size;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (size <= PAGE_SIZE)
                        newinfo->entries[cpu] = kmalloc_node(size,
                                                        GFP_KERNEL,
@@ -436,7 +436,7 @@ void xt_free_table_info(struct xt_table_info *info)
 {
        int cpu;
 
-       for_each_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                if (info->size <= PAGE_SIZE)
                        kfree(info->entries[cpu]);
                else
index d47a52c303a81da44da5f8d8c478f77049e69483..5b3b0e0ae7e506391d22882ec0d7a91a1411800b 100644 (file)
@@ -69,7 +69,7 @@ fold_field(void *mib[], int nr)
        unsigned long res = 0;
        int i;
 
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                res +=
                    *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
                                         sizeof (unsigned long) * nr));
index b807f360e02ccc336ad45f606feb92506b21af77..00cdfd2088dbd444b9ecb5a00b93be30ebec0fdc 100644 (file)
@@ -2136,7 +2136,7 @@ void socket_seq_show(struct seq_file *seq)
        int cpu;
        int counter = 0;
 
-       for_each_cpu(cpu)
+       for_each_possible_cpu(cpu)
                counter += per_cpu(sockets_in_use, cpu);
 
        /* It can be negative, by the way. 8) */