linux/fs/coda/cache.c
Yoshihisa Abe b5ce1d83a6 Coda: add spin lock to protect accesses to struct coda_inode_info.
We mostly need it to protect cached user permissions. The c_flags field
is advisory, reading the wrong value is harmless and in the worst case
we hit a slow path where we have to make an extra upcall to the
userspace cache manager when revalidating a dentry or inode.

Signed-off-by: Yoshihisa Abe <yoshiabe@cs.cmu.edu>
Signed-off-by: Jan Harkes <jaharkes@cs.cmu.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-10-25 08:02:40 -07:00

124 lines
3.1 KiB
C

/*
* Cache operations for Coda.
* For Linux 2.1: (C) 1997 Carnegie Mellon University
* For Linux 2.3: (C) 2000 Carnegie Mellon University
*
* Carnegie Mellon encourages users of this code to contribute improvements
* to the Coda project http://www.coda.cs.cmu.edu/ <coda@cs.cmu.edu>.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <asm/uaccess.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/coda.h>
#include <linux/coda_linux.h>
#include <linux/coda_psdev.h>
#include <linux/coda_fs_i.h>
#include <linux/coda_cache.h>
static atomic_t permission_epoch = ATOMIC_INIT(0);
/* replace or extend an acl cache hit */
void coda_cache_enter(struct inode *inode, int mask)
{
struct coda_inode_info *cii = ITOC(inode);
spin_lock(&cii->c_lock);
cii->c_cached_epoch = atomic_read(&permission_epoch);
if (cii->c_uid != current_fsuid()) {
cii->c_uid = current_fsuid();
cii->c_cached_perm = mask;
} else
cii->c_cached_perm |= mask;
spin_unlock(&cii->c_lock);
}
/* remove cached acl from an inode */
void coda_cache_clear_inode(struct inode *inode)
{
struct coda_inode_info *cii = ITOC(inode);
spin_lock(&cii->c_lock);
cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
spin_unlock(&cii->c_lock);
}
/* remove all acl caches */
void coda_cache_clear_all(struct super_block *sb)
{
atomic_inc(&permission_epoch);
}
/* check if the mask has been matched against the acl already */
int coda_cache_check(struct inode *inode, int mask)
{
struct coda_inode_info *cii = ITOC(inode);
int hit;
spin_lock(&cii->c_lock);
hit = (mask & cii->c_cached_perm) == mask &&
cii->c_uid == current_fsuid() &&
cii->c_cached_epoch == atomic_read(&permission_epoch);
spin_unlock(&cii->c_lock);
return hit;
}
/* Purging dentries and children */
/* The following routines drop dentries which are not
in use and flag dentries which are in use to be
zapped later.
The flags are detected by:
- coda_dentry_revalidate (for lookups) if the flag is C_PURGE
- coda_dentry_delete: to remove dentry from the cache when d_count
falls to zero
- an inode method coda_revalidate (for attributes) if the
flag is C_VATTR
*/
/* this won't do any harm: just flag all children */
static void coda_flag_children(struct dentry *parent, int flag)
{
struct list_head *child;
struct dentry *de;
spin_lock(&dcache_lock);
list_for_each(child, &parent->d_subdirs)
{
de = list_entry(child, struct dentry, d_u.d_child);
/* don't know what to do with negative dentries */
if ( ! de->d_inode )
continue;
coda_flag_inode(de->d_inode, flag);
}
spin_unlock(&dcache_lock);
return;
}
void coda_flag_inode_children(struct inode *inode, int flag)
{
struct dentry *alias_de;
if ( !inode || !S_ISDIR(inode->i_mode))
return;
alias_de = d_find_alias(inode);
if (!alias_de)
return;
coda_flag_children(alias_de, flag);
shrink_dcache_parent(alias_de);
dput(alias_de);
}