return error;
}
+static DEFINE_SPINLOCK(nfs_access_lru_lock);
+static LIST_HEAD(nfs_access_lru_list);
+static atomic_long_t nfs_access_nr_entries;
+
static void nfs_access_free_entry(struct nfs_access_entry *entry)
{
put_rpccred(entry->cred);
kfree(entry);
+ smp_mb__before_atomic_dec();
+ atomic_long_dec(&nfs_access_nr_entries);
+ smp_mb__after_atomic_dec();
}
static void __nfs_access_zap_cache(struct inode *inode)
while ((n = rb_first(root_node)) != NULL) {
entry = rb_entry(n, struct nfs_access_entry, rb_node);
rb_erase(n, root_node);
+ list_del(&entry->lru);
n->rb_left = dispose;
dispose = n;
}
void nfs_access_zap_cache(struct inode *inode)
{
+ /* Remove from global LRU init */
+ if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
+ spin_lock(&nfs_access_lru_lock);
+ list_del_init(&NFS_I(inode)->access_cache_inode_lru);
+ spin_unlock(&nfs_access_lru_lock);
+ }
+
spin_lock(&inode->i_lock);
/* This will release the spinlock */
__nfs_access_zap_cache(inode);
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
+ list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
err = 0;
out:
spin_unlock(&inode->i_lock);
return err;
out_stale:
rb_erase(&cache->rb_node, &nfsi->access_cache);
+ list_del(&cache->lru);
spin_unlock(&inode->i_lock);
nfs_access_free_entry(cache);
return -ENOENT;
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
{
- struct rb_root *root_node = &NFS_I(inode)->access_cache;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct rb_root *root_node = &nfsi->access_cache;
struct rb_node **p = &root_node->rb_node;
struct rb_node *parent = NULL;
struct nfs_access_entry *entry;
}
rb_link_node(&set->rb_node, parent, p);
rb_insert_color(&set->rb_node, root_node);
+ list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
spin_unlock(&inode->i_lock);
return;
found:
rb_replace_node(parent, &set->rb_node, root_node);
+ list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
+ list_del(&entry->lru);
spin_unlock(&inode->i_lock);
nfs_access_free_entry(entry);
}
cache->mask = set->mask;
nfs_access_add_rbtree(inode, cache);
+
+ /* Update accounting */
+ smp_mb__before_atomic_inc();
+ atomic_long_inc(&nfs_access_nr_entries);
+ smp_mb__after_atomic_inc();
+
+ /* Add inode to global LRU list */
+ if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
+ spin_lock(&nfs_access_lru_lock);
+ list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
+ spin_unlock(&nfs_access_lru_lock);
+ }
}
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
*/
struct nfs_access_entry {
struct rb_node rb_node;
+ struct list_head lru;
unsigned long jiffies;
struct rpc_cred * cred;
int mask;
atomic_t data_updates;
struct rb_root access_cache;
+ struct list_head access_cache_entry_lru;
+ struct list_head access_cache_inode_lru;
#ifdef CONFIG_NFS_V3_ACL
struct posix_acl *acl_access;
struct posix_acl *acl_default;
#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
#define NFS_INO_STALE (2) /* possible stale inode */
+#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */
static inline struct nfs_inode *NFS_I(struct inode *inode)
{