From: Eric Sandeen Date: Thu, 11 Oct 2007 07:42:10 +0000 (+1000) Subject: [XFS] Unwrap mru_lock. X-Git-Tag: v2.6.25-rc1~282^2~48 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ba74d0cba51dcaa99e4dc2e4fb62e6e13abbf703;p=linux-2.6 [XFS] Unwrap mru_lock. Un-obfuscate mru_lock, remove mutex_lock->spin_lock macros, call spin_lock directly, remove extraneous cookie holdover from old xfs code. SGI-PV: 970382 SGI-Modid: xfs-linux-melb:xfs-kern:29745a Signed-off-by: Eric Sandeen Signed-off-by: Donald Douwsma Signed-off-by: Tim Shimmin --- diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index e0b358c1c5..dc64630e87 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -245,7 +245,7 @@ _xfs_mru_cache_clear_reap_list( */ list_move(&elem->list_node, &tmp); } - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); list_for_each_entry_safe(elem, next, &tmp, list_node) { @@ -259,7 +259,7 @@ _xfs_mru_cache_clear_reap_list( kmem_zone_free(xfs_mru_elem_zone, elem); } - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); } /* @@ -280,7 +280,7 @@ _xfs_mru_cache_reap( if (!mru || !mru->lists) return; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); next = _xfs_mru_cache_migrate(mru, jiffies); _xfs_mru_cache_clear_reap_list(mru); @@ -294,7 +294,7 @@ _xfs_mru_cache_reap( queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); } - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); } int @@ -398,17 +398,17 @@ xfs_mru_cache_flush( if (!mru || !mru->lists) return; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); if (mru->queued) { - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); } _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); _xfs_mru_cache_clear_reap_list(mru); - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); } void @@ -454,13 +454,13 @@ xfs_mru_cache_insert( elem->key = key; elem->value = value; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); radix_tree_insert(&mru->store, key, elem); radix_tree_preload_end(); _xfs_mru_cache_list_insert(mru, elem); - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); return 0; } @@ -483,14 +483,14 @@ xfs_mru_cache_remove( if (!mru || !mru->lists) return NULL; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); elem = radix_tree_delete(&mru->store, key); if (elem) { value = elem->value; list_del(&elem->list_node); } - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); if (elem) kmem_zone_free(xfs_mru_elem_zone, elem); @@ -540,14 +540,14 @@ xfs_mru_cache_lookup( if (!mru || !mru->lists) return NULL; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); elem = radix_tree_lookup(&mru->store, key); if (elem) { list_del(&elem->list_node); _xfs_mru_cache_list_insert(mru, elem); } else - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); return elem ? elem->value : NULL; } @@ -571,10 +571,10 @@ xfs_mru_cache_peek( if (!mru || !mru->lists) return NULL; - mutex_spinlock(&mru->lock); + spin_lock(&mru->lock); elem = radix_tree_lookup(&mru->store, key); if (!elem) - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); return elem ? elem->value : NULL; } @@ -588,5 +588,5 @@ void xfs_mru_cache_done( xfs_mru_cache_t *mru) { - mutex_spinunlock(&mru->lock, 0); + spin_unlock(&mru->lock); }