]> err.no Git - linux-2.6/commitdiff
[NetLabel]: make the CIPSOv4 cache spinlocks bottom half safe
authorPaul Moore <paul.moore@hp.com>
Mon, 25 Sep 2006 22:52:37 +0000 (15:52 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 25 Sep 2006 22:52:37 +0000 (15:52 -0700)
The CIPSOv4 cache traversal routines are triggered both the userspace events
(cache invalidation due to DOI removal or updated SELinux policy) and network
packet processing events.  As a result there is a problem with the existing
CIPSOv4 cache spinlocks as they are not bottom-half/softirq safe.  This patch
converts the CIPSOv4 cache spin_[un]lock() calls into spin_[un]lock_bh() calls
to address this problem.

Signed-off-by: Paul Moore <paul.moore@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/cipso_ipv4.c

index a3bae2ca8acc390596a2e7ddbab083abb55b8bfe..87e71563335d61fb0f551b6b62e4196a8cd1bea9 100644 (file)
@@ -259,7 +259,7 @@ void cipso_v4_cache_invalidate(void)
        u32 iter;
 
        for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
-               spin_lock(&cipso_v4_cache[iter].lock);
+               spin_lock_bh(&cipso_v4_cache[iter].lock);
                list_for_each_entry_safe(entry,
                                         tmp_entry,
                                         &cipso_v4_cache[iter].list, list) {
@@ -267,7 +267,7 @@ void cipso_v4_cache_invalidate(void)
                        cipso_v4_cache_entry_free(entry);
                }
                cipso_v4_cache[iter].size = 0;
-               spin_unlock(&cipso_v4_cache[iter].lock);
+               spin_unlock_bh(&cipso_v4_cache[iter].lock);
        }
 
        return;
@@ -309,7 +309,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
 
        hash = cipso_v4_map_cache_hash(key, key_len);
        bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
-       spin_lock(&cipso_v4_cache[bkt].lock);
+       spin_lock_bh(&cipso_v4_cache[bkt].lock);
        list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
                if (entry->hash == hash &&
                    entry->key_len == key_len &&
@@ -318,7 +318,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
                        secattr->cache.free = entry->lsm_data.free;
                        secattr->cache.data = entry->lsm_data.data;
                        if (prev_entry == NULL) {
-                               spin_unlock(&cipso_v4_cache[bkt].lock);
+                               spin_unlock_bh(&cipso_v4_cache[bkt].lock);
                                return 0;
                        }
 
@@ -333,12 +333,12 @@ static int cipso_v4_cache_check(const unsigned char *key,
                                           &prev_entry->list);
                        }
 
-                       spin_unlock(&cipso_v4_cache[bkt].lock);
+                       spin_unlock_bh(&cipso_v4_cache[bkt].lock);
                        return 0;
                }
                prev_entry = entry;
        }
-       spin_unlock(&cipso_v4_cache[bkt].lock);
+       spin_unlock_bh(&cipso_v4_cache[bkt].lock);
 
        return -ENOENT;
 }
@@ -387,7 +387,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
        entry->lsm_data.data = secattr->cache.data;
 
        bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
-       spin_lock(&cipso_v4_cache[bkt].lock);
+       spin_lock_bh(&cipso_v4_cache[bkt].lock);
        if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
                list_add(&entry->list, &cipso_v4_cache[bkt].list);
                cipso_v4_cache[bkt].size += 1;
@@ -398,7 +398,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
                list_add(&entry->list, &cipso_v4_cache[bkt].list);
                cipso_v4_cache_entry_free(old_entry);
        }
-       spin_unlock(&cipso_v4_cache[bkt].lock);
+       spin_unlock_bh(&cipso_v4_cache[bkt].lock);
 
        return 0;