summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorPaul Moore <paul.moore@hp.com>2006-09-26 02:52:37 +0400
committerDavid S. Miller <davem@davemloft.net>2006-09-26 02:52:37 +0400
commit609c92feea5652809319bb77f19d24a44615687d (patch)
tree01c7523782233356d0a373f775f21fa52099cd23 /net/ipv4
parent14a72f53fb1bb5d5c2bdd8cf172219519664729a (diff)
downloadlinux-609c92feea5652809319bb77f19d24a44615687d.tar.xz
[NetLabel]: make the CIPSOv4 cache spinlocks bottom half safe
The CIPSOv4 cache traversal routines are triggered both the userspace events (cache invalidation due to DOI removal or updated SELinux policy) and network packet processing events. As a result there is a problem with the existing CIPSOv4 cache spinlocks as they are not bottom-half/softirq safe. This patch converts the CIPSOv4 cache spin_[un]lock() calls into spin_[un]lock_bh() calls to address this problem. Signed-off-by: Paul Moore <paul.moore@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/cipso_ipv4.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a3bae2ca8acc..87e71563335d 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -259,7 +259,7 @@ void cipso_v4_cache_invalidate(void)
u32 iter;
for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
- spin_lock(&cipso_v4_cache[iter].lock);
+ spin_lock_bh(&cipso_v4_cache[iter].lock);
list_for_each_entry_safe(entry,
tmp_entry,
&cipso_v4_cache[iter].list, list) {
@@ -267,7 +267,7 @@ void cipso_v4_cache_invalidate(void)
cipso_v4_cache_entry_free(entry);
}
cipso_v4_cache[iter].size = 0;
- spin_unlock(&cipso_v4_cache[iter].lock);
+ spin_unlock_bh(&cipso_v4_cache[iter].lock);
}
return;
@@ -309,7 +309,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
hash = cipso_v4_map_cache_hash(key, key_len);
bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
- spin_lock(&cipso_v4_cache[bkt].lock);
+ spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash &&
entry->key_len == key_len &&
@@ -318,7 +318,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
secattr->cache.free = entry->lsm_data.free;
secattr->cache.data = entry->lsm_data.data;
if (prev_entry == NULL) {
- spin_unlock(&cipso_v4_cache[bkt].lock);
+ spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
@@ -333,12 +333,12 @@ static int cipso_v4_cache_check(const unsigned char *key,
&prev_entry->list);
}
- spin_unlock(&cipso_v4_cache[bkt].lock);
+ spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;
}
prev_entry = entry;
}
- spin_unlock(&cipso_v4_cache[bkt].lock);
+ spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return -ENOENT;
}
@@ -387,7 +387,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
entry->lsm_data.data = secattr->cache.data;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
- spin_lock(&cipso_v4_cache[bkt].lock);
+ spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache[bkt].size += 1;
@@ -398,7 +398,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache_entry_free(old_entry);
}
- spin_unlock(&cipso_v4_cache[bkt].lock);
+ spin_unlock_bh(&cipso_v4_cache[bkt].lock);
return 0;