Commit 1b1dcc1b authored by Jes Sorensen's avatar Jes Sorensen Committed by Ingo Molnar
Browse files

[PATCH] mutex subsystem, semaphore to mutex: VFS, ->i_sem


This patch converts the inode semaphore to a mutex. I have tested it on
XFS and compiled as much as one can consider on an ia64. Anyway your
luck with it might be different.
Modified-by: default avatarIngo Molnar <mingo@elte.hu>

(finished the conversion)
Signed-off-by: default avatarJes Sorensen <jes@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 794ee1ba
......@@ -547,13 +547,13 @@ static int hfs_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
hfs_file_truncate(inode);
//if (inode->i_flags & S_DEAD) {
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
// hfs_delete_inode(inode);
//}
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
}
return 0;
}
......
......@@ -29,7 +29,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
down(&HFSPLUS_SB(sb).alloc_file->i_sem);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
(filler_t *)mapping->a_ops->readpage, NULL);
......@@ -143,7 +143,7 @@ done:
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
up(&HFSPLUS_SB(sb).alloc_file->i_sem);
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
}
......@@ -164,7 +164,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
return -2;
down(&HFSPLUS_SB(sb).alloc_file->i_sem);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
......@@ -215,7 +215,7 @@ out:
kunmap(page);
HFSPLUS_SB(sb).free_blocks += len;
sb->s_dirt = 1;
up(&HFSPLUS_SB(sb).alloc_file->i_sem);
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return 0;
}
......@@ -276,13 +276,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
hfsplus_file_truncate(inode);
if (inode->i_flags & S_DEAD) {
hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
hfsplus_delete_inode(inode);
}
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
}
return 0;
}
......
......@@ -32,19 +32,19 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
down(&i->i_sem);
mutex_lock(&i->i_mutex);
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
while (pos != new_off) {
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
else goto fail;
if (pos == 12) goto fail;
}
up(&i->i_sem);
mutex_unlock(&i->i_mutex);
ok:
unlock_kernel();
return filp->f_pos = new_off;
fail:
up(&i->i_sem);
mutex_unlock(&i->i_mutex);
/*printk("illegal lseek: %016llx\n", new_off);*/
unlock_kernel();
return -ESPIPE;
......
......@@ -171,12 +171,12 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
err = -ENOMEM;
parent = HPPFS_I(ino)->proc_dentry;
down(&parent->d_inode->i_sem);
mutex_lock(&parent->d_inode->i_mutex);
proc_dentry = d_lookup(parent, &dentry->d_name);
if(proc_dentry == NULL){
proc_dentry = d_alloc(parent, &dentry->d_name);
if(proc_dentry == NULL){
up(&parent->d_inode->i_sem);
mutex_unlock(&parent->d_inode->i_mutex);
goto out;
}
new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
......@@ -186,7 +186,7 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
proc_dentry = new;
}
}
up(&parent->d_inode->i_sem);
mutex_unlock(&parent->d_inode->i_mutex);
if(IS_ERR(proc_dentry))
return(proc_dentry);
......
......@@ -118,7 +118,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
file_accessed(file);
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
......@@ -133,7 +133,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (inode->i_size < len)
inode->i_size = len;
out:
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
return ret;
}
......
......@@ -192,7 +192,7 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
sema_init(&inode->i_sem, 1);
mutex_init(&inode->i_mutex);
init_rwsem(&inode->i_alloc_sem);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock);
......
......@@ -1415,7 +1415,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
* This will never trigger with sane page sizes. leave it in
* anyway, since I'm thinking about how to merge larger writes
* (the current idea is to poke a thread that does the actual
* I/O and starts by doing a down(&inode->i_sem). then we
* I/O and starts by doing a mutex_lock(&inode->i_mutex). then we
* would need to get the page cache pages and have a list of
* I/O requests and do write-merging here.
* -- prumpf
......
......@@ -58,7 +58,7 @@ struct jfs_inode_info {
/*
* rdwrlock serializes xtree between reads & writes and synchronizes
* changes to special inodes. It's use would be redundant on
* directories since the i_sem taken in the VFS is sufficient.
* directories since the i_mutex taken in the VFS is sufficient.
*/
struct rw_semaphore rdwrlock;
/*
......@@ -68,7 +68,7 @@ struct jfs_inode_info {
* inode is blocked in txBegin or TxBeginAnon
*/
struct semaphore commit_sem;
/* xattr_sem allows us to access the xattrs without taking i_sem */
/* xattr_sem allows us to access the xattrs without taking i_mutex */
struct rw_semaphore xattr_sem;
lid_t xtlid; /* lid of xtree lock on directory */
#ifdef CONFIG_JFS_POSIX_ACL
......
......@@ -74,7 +74,7 @@ int dcache_dir_close(struct inode *inode, struct file *file)
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
{
down(&file->f_dentry->d_inode->i_sem);
mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
......@@ -82,7 +82,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
up(&file->f_dentry->d_inode->i_sem);
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
......@@ -106,7 +106,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
spin_unlock(&dcache_lock);
}
}
up(&file->f_dentry->d_inode->i_sem);
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return offset;
}
......@@ -356,7 +356,7 @@ int simple_commit_write(struct file *file, struct page *page,
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold the i_sem.
* cannot change under us because we hold the i_mutex.
*/
if (pos > inode->i_size)
i_size_write(inode, pos);
......
......@@ -438,7 +438,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
struct dentry * result;
struct inode *dir = parent->d_inode;
down(&dir->i_sem);
mutex_lock(&dir->i_mutex);
/*
* First re-do the cached lookup just in case it was created
* while we waited for the directory semaphore..
......@@ -464,7 +464,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
else
result = dentry;
}
up(&dir->i_sem);
mutex_unlock(&dir->i_mutex);
return result;
}
......@@ -472,7 +472,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
* Uhhuh! Nasty case: the cache was re-populated while
* we waited on the semaphore. Need to revalidate.
*/
up(&dir->i_sem);
mutex_unlock(&dir->i_mutex);
if (result->d_op && result->d_op->d_revalidate) {
if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
dput(result);
......@@ -1366,7 +1366,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
struct dentry *p;
if (p1 == p2) {
down(&p1->d_inode->i_sem);
mutex_lock(&p1->d_inode->i_mutex);
return NULL;
}
......@@ -1374,30 +1374,30 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
for (p = p1; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p2) {
down(&p2->d_inode->i_sem);
down(&p1->d_inode->i_sem);
mutex_lock(&p2->d_inode->i_mutex);
mutex_lock(&p1->d_inode->i_mutex);
return p;
}
}
for (p = p2; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p1) {
down(&p1->d_inode->i_sem);
down(&p2->d_inode->i_sem);
mutex_lock(&p1->d_inode->i_mutex);
mutex_lock(&p2->d_inode->i_mutex);
return p;
}
}
down(&p1->d_inode->i_sem);
down(&p2->d_inode->i_sem);
mutex_lock(&p1->d_inode->i_mutex);
mutex_lock(&p2->d_inode->i_mutex);
return NULL;
}
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
up(&p1->d_inode->i_sem);
mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
up(&p2->d_inode->i_sem);
mutex_unlock(&p2->d_inode->i_mutex);
up(&p1->d_inode->i_sb->s_vfs_rename_sem);
}
}
......@@ -1563,14 +1563,14 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
dir = nd->dentry;
nd->flags &= ~LOOKUP_PARENT;
down(&dir->d_inode->i_sem);
mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
do_last:
error = PTR_ERR(path.dentry);
if (IS_ERR(path.dentry)) {
up(&dir->d_inode->i_sem);
mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
......@@ -1579,7 +1579,7 @@ do_last:
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current->fs->umask;
error = vfs_create(dir->d_inode, path.dentry, mode, nd);
up(&dir->d_inode->i_sem);
mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->dentry);
nd->dentry = path.dentry;
if (error)
......@@ -1593,7 +1593,7 @@ do_last:
/*
* It already exists.
*/
up(&dir->d_inode->i_sem);
mutex_unlock(&dir->d_inode->i_mutex);
error = -EEXIST;
if (flag & O_EXCL)
......@@ -1665,7 +1665,7 @@ do_link:
goto exit;
}
dir = nd->dentry;
down(&dir->d_inode->i_sem);
mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
__putname(nd->last.name);
......@@ -1680,13 +1680,13 @@ do_link:
* Simple function to lookup and return a dentry and create it
* if it doesn't exist. Is SMP-safe.
*
* Returns with nd->dentry->d_inode->i_sem locked.
* Returns with nd->dentry->d_inode->i_mutex locked.
*/
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
down(&nd->dentry->d_inode->i_sem);
mutex_lock(&nd->dentry->d_inode->i_mutex);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
......@@ -1784,7 +1784,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
}
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
......@@ -1836,7 +1836,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
......@@ -1885,7 +1885,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
down(&dentry->d_inode->i_sem);
mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
if (d_mountpoint(dentry))
error = -EBUSY;
......@@ -1897,7 +1897,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_flags |= S_DEAD;
}
}
up(&dentry->d_inode->i_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
if (!error) {
d_delete(dentry);
}
......@@ -1932,14 +1932,14 @@ asmlinkage long sys_rmdir(const char __user * pathname)
error = -EBUSY;
goto exit1;
}
down(&nd.dentry->d_inode->i_sem);
mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
error = vfs_rmdir(nd.dentry->d_inode, dentry);
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
exit1:
path_release(&nd);
exit:
......@@ -1959,7 +1959,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
down(&dentry->d_inode->i_sem);
mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
else {
......@@ -1967,7 +1967,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!error)
error = dir->i_op->unlink(dir, dentry);
}
up(&dentry->d_inode->i_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
......@@ -1979,7 +1979,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
/*
* Make sure that the actual truncation of the file will occur outside its
* directory's i_sem. Truncate can take a long time if there is a lot of
* directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
......@@ -2001,7 +2001,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
down(&nd.dentry->d_inode->i_sem);
mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
......@@ -2015,7 +2015,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
exit2:
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
......@@ -2075,7 +2075,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
dput(dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(to);
......@@ -2113,10 +2113,10 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
if (error)
return error;
down(&old_dentry->d_inode->i_sem);
mutex_lock(&old_dentry->d_inode->i_mutex);
DQUOT_INIT(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
up(&old_dentry->d_inode->i_sem);
mutex_unlock(&old_dentry->d_inode->i_mutex);
if (!error)
fsnotify_create(dir, new_dentry->d_name.name);
return error;
......@@ -2157,7 +2157,7 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
dput(new_dentry);
}
up(&nd.dentry->d_inode->i_sem);
mutex_unlock(&nd.dentry->d_inode->i_mutex);
out_release:
path_release(&nd);
out:
......@@ -2178,7 +2178,7 @@ exit:
* sb->s_vfs_rename_sem. We might be more accurate, but that's another
* story.
* c) we have to lock _three_ objects - parents and victim (if it exists).
* And that - after we got ->i_sem on parents (until then we don't know
* And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_sem _and_ that parent of the object we
......@@ -2195,9 +2195,9 @@ exit:
* stuff into VFS), but the former is not going away. Solution: the same
* trick as in rmdir().
* e) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_sem
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
* ->i_sem on parents, which works but leads to some truely excessive
* ->i_mutex on parents, which works but leads to some truely excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
......@@ -2222,7 +2222,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
target = new_dentry->d_inode;
if (target) {
down(&target->i_sem);
mutex_lock(&target->i_mutex);
dentry_unhash(new_dentry);
}
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
......@@ -2232,7 +2232,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
if (target) {
if (!error)
target->i_flags |= S_DEAD;
up(&target->i_sem);
mutex_unlock(&target->i_mutex);
if (d_unhashed(new_dentry))
d_rehash(new_dentry);
dput(new_dentry);
......@@ -2255,7 +2255,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
dget(new_dentry);
target = new_dentry->d_inode;
if (target)
down(&target->i_sem);
mutex_lock(&target->i_mutex);
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
else
......@@ -2266,7 +2266,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
d_move(old_dentry, new_dentry);
}
if (target)
up(&target->i_sem);
mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
......
......@@ -814,7 +814,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
return -ENOTDIR;
err = -ENOENT;
down(&nd->dentry->d_inode->i_sem);
mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out_unlock;
......@@ -826,7 +826,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
up(&nd->dentry->d_inode->i_sem);
mutex_unlock(&nd->dentry->d_inode->i_mutex);
if (!err)
security_sb_post_addmount(mnt, nd);
return err;
......@@ -962,7 +962,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
goto out;
err = -ENOENT;
down(&nd->dentry->d_inode->i_sem);
mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out1;
......@@ -1004,7 +1004,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
list_del_init(&old_nd.mnt->mnt_expire);
spin_unlock(&vfsmount_lock);
out1:
up(&nd->dentry->d_inode->i_sem);
mutex_unlock(&nd->dentry->d_inode->i_mutex);
out:
up_write(&namespace_sem);
if (!err)
......@@ -1573,7 +1573,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
user_nd.dentry = dget(current->fs->root);
read_unlock(&current->fs->lock);
down_write(&namespace_sem);
down(&old_nd.dentry->d_inode->i_sem);
mutex_lock(&old_nd.dentry->d_inode->i_mutex);
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.mnt) ||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
......@@ -1626,7 +1626,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
path_release(&root_parent);
path_release(&parent_nd);
out2:
up(&old_nd.dentry->d_inode->i_sem);
mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
......
......@@ -194,7 +194,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
spin_unlock(&inode->i_lock);
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
* through inode->i_sem or some other mechanism.
* through inode->i_mutex or some other mechanism.
*/
if (page->index == 0)
invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
......@@ -573,7 +573,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
down(&filp->f_dentry->d_inode->i_sem);
mutex_lock(&filp->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += filp->f_pos;
......@@ -589,7 +589,7 @@ loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
}
out:
up(&filp->f_dentry->d_inode->i_sem);
mutex_unlock(&filp->f_dentry->d_inode->i_mutex);
return offset;
}
......@@ -1001,7 +1001,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
openflags &= ~(O_CREAT|O_TRUNC);
/*
* Note: we're not holding inode->i_sem and so may be racing with
* Note: we're not holding inode->i_mutex and so may be racing with
* operations that change the directory. We therefore save the
* change attribute *before* we do the RPC call.
*/
......@@ -1051,7 +1051,7 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
return dentry;
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
/* Note: caller is already holding the dir->i_sem! */
/* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
return NULL;
......
......@@ -121,9 +121,9 @@ out:
static void
nfsd4_sync_rec_dir(void)
{
down(&rec_dir.dentry->d_inode->i_sem);
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
nfsd_sync_dir(rec_dir.dentry);
up(&rec_dir.dentry->d_inode->i_sem);
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
}
int
......@@ -143,7 +143,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
nfs4_save_user(&uid, &gid);
/* lock the parent */
down(&rec_dir.dentry->d_inode->i_sem);
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
......@@ -159,7 +159,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
out_put:
dput(dentry);
out_unlock:
up(&rec_dir.dentry->d_inode->i_sem);
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (status == 0) {
clp->cl_firststate = 1;
nfsd4_sync_rec_dir();
......@@ -259,9 +259,9 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
printk("nfsd4: non-file found in client recovery directory\n");
return -EINVAL;
}
down(&dir->d_inode->i_sem);
mutex_lock(&dir->d_inode->i_mutex);
status = vfs_unlink(dir->d_inode, dentry);
up(&dir->d_inode->i_sem);
mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
......@@ -274,9 +274,9 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
* any regular files anyway, just in case the directory was created by
* a kernel from the future.... */
nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
down(&dir->d_inode->i_sem);
mutex_lock(&dir->d_inode->i_mutex);
status = vfs_rmdir(dir->d_inode, dentry);
up(&dir->d_inode->i_sem);
mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
......@@ -288,9 +288,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
down(&rec_dir.dentry->d_inode->i_sem);
mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
up(&rec_dir.dentry->d_inode->i_sem);
mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
return status;
......
......@@ -390,12 +390,12 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
error = -EOPNOTSUPP;
if (inode->i_op && inode->i_op->setxattr) {
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
security_inode_setxattr(dentry, key, buf, len, 0);
error = inode->i_op->setxattr(dentry, key, buf, len, 0);
if (!error)
security_inode_post_setxattr(dentry, key, buf, len, 0);
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
}
out:
kfree(buf);
......@@ -739,9 +739,9 @@ nfsd_sync(struct file *filp)
int err;
struct inode *inode = filp->f_dentry->d_inode;
dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
return err;
}
......@@ -885,9 +885,9 @@ static void kill_suid(struct dentry *dentry)
struct iattr ia;
ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
down(&dentry->d_inode->i_sem);
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &ia);
up(&dentry->d_inode->i_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
}
static inline int
......
......@@ -1532,7 +1532,7 @@ int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
* Locking: - The caller must hold i_sem on the inode.
* Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
......@@ -1728,7 +1728,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
/*
* This needs to be last since the address space operations ->readpage
* and ->writepage can run concurrently with us as they are not
* serialized on i_sem. Note, we are not allowed to fail once we flip
* serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
......
......@@ -69,7 +69,7 @@ ntfschar I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
* work but we don't care for how quickly one can access them. This also fixes
* the dcache aliasing issues.
*
* Locking: - Caller must hold i_sem on the directory.
* Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
......@@ -1085,11 +1085,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
* While this will return the names in random order this doesn't matter for
* ->readdir but OTOH results in a faster ->readdir.
*
* VFS calls ->readdir without BKL but with i_sem held. This protects the VFS
* VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS
* parts (e.g. ->f_pos and ->i_size, and it also protects against directory
* modifications).
*
* Locking: - Caller must hold i_sem on the directory.
* Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
......@@ -1520,7 +1520,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
* Locking: Caller must hold i_sem on the inode.
* Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
......
......@@ -106,7 +106,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
* this is the case, the necessary zeroing will also have happened and that all
* metadata is self-consistent.
*
* Locking: i_sem on the vfs inode corrseponsind to the ntfs inode @ni must be
* Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
* held by the caller.
*/
static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
......@@ -473,7 +473,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
* @bytes: number of bytes to be written
*
* This is called for non-resident attributes from ntfs_file_buffered_write()
* with i_sem held on the inode (@pages[0]->mapping->host). There are
* with i_mutex held on the inode (@pages[0]->mapping->host). There are
* @nr_pages pages in @pages which are locked but not kmap()ped. The source
* data has not yet been copied into the @pages.
*
......@@ -1637,7 +1637,7 @@ err_out:
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
* This is called from ntfs_file_buffered_write() with i_sem held on the inode
* This is called from ntfs_file_buffered_write() with i_mutex held on the inode
* (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
* locked but not kmap()ped. The source data has already been copied into the
* @page. ntfs_prepare_pages_for_non_resident_write() has been called before
......@@ -1814,7 +1814,7 @@ err_out:
/**
* ntfs_file_buffered_write -
*
* Locking: The vfs is holding ->i_sem on the inode.
* Locking: The vfs is holding ->i_mutex on the inode.
*/
static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs,
......@@ -2196,9 +2196,9 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
BUG_ON(iocb->ki_pos != pos);
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, pos, ret);
if (err < 0)
......@@ -2221,12 +2221,12 @@ static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
struct kiocb kiocb;
ssize_t ret;
down(&inode->i_sem);
mutex_lock(&inode->i_mutex);
init_sync_kiocb(&kiocb, file);
ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
if (ret == -EIOCBQUEUED)
ret = wait_on_sync_kiocb(&kiocb);
up(&inode->i_sem);
mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, *ppos - ret, ret);
if (err < 0)
......@@ -2269,7 +2269,7 @@ static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
* Locking: Caller must hold i_sem on the inode.
* Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
......
......@@ -32,7 +32,7 @@
* Allocate a new index context, initialize it with @idx_ni and return it.
* Return NULL if allocation failed.
*
* Locking: Caller must hold i_sem on the index inode.
* Locking: Caller must hold i_mutex on the index inode.
*/
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
{
......@@ -50,7 +50,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
*
* Release the index context @ictx, releasing all associated resources.
*
* Locking: Caller must hold i_sem on the index inode.
* Locking: Caller must hold i_mutex on the index inode.
*/
void ntfs_index_ctx_put(ntfs_index_context *ictx)
{
......@@ -106,7 +106,7 @@ void ntfs_index_ctx_put(ntfs_index_context *ictx)
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
* ensure that the changes are written to disk.
*
* Locking: - Caller must hold i_sem on the index inode.
* Locking: - Caller must hold i_mutex on the index inode.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
......
......@@ -2125,13 +2125,13 @@ void ntfs_put_inode(struct inode *vi)
ntfs_inode *ni = NTFS_I(vi);
if (NInoIndexAllocPresent(ni)) {
struct inode *bvi = NULL;
down(&vi->i_sem);
mutex_lock(&vi->i_mutex);
if (atomic_read(&vi->i_count) == 2) {
bvi = ni->itype.index.bmp_ino;
if (bvi)
ni->itype.index.bmp_ino = NULL;
}
up(&vi->i_sem);
mutex_unlock(&vi->i_mutex);
if (bvi)
iput(bvi);
}
......@@ -2311,7 +2311,7 @@ static const char *es = " Leaving inconsistent metadata. Unmount and run "
*
* Returns 0 on success or -errno on error.
*
* Called with ->i_sem held. In all but one case ->i_alloc_sem is held for
* Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for
* writing. The only case in the kernel where ->i_alloc_sem is not held is
* mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
* with the current i_size as the offset. The analogous place in NTFS is in
......@@ -2831,7 +2831,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
* We also abort all changes of user, group, and mode as we do not implement
* the NTFS ACLs yet.
*
* Called with ->i_sem held. For the ATTR_SIZE (i.e. ->truncate) case, also
* Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also
* called with ->i_alloc_sem held for writing.
*
* Basically this is a copy of generic notify_change() and inode_setattr()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment