alloc_sem
---------
-The alloc_sem is a per-filesystem semaphore, used primarily to ensure
+The alloc_sem is a per-filesystem mutex, used primarily to ensure
contiguous allocation of space on the medium. It is automatically
obtained during space allocations (jffs2_reserve_space()) and freed
upon write completion (jffs2_complete_reservation()). Note that
Ordering constraints: See f->sem.
- File Semaphore f->sem
+ File Mutex f->sem
---------------------
-This is the JFFS2-internal equivalent of the inode semaphore i->i_sem.
+This is the JFFS2-internal equivalent of the inode mutex i->i_sem.
It protects the contents of the jffs2_inode_info private inode data,
including the linked list of node fragments (but see the notes below on
erase_completion_lock), etc.
before calling the space allocation functions.
Instead of playing such games, we just have an extra internal
-semaphore, which is obtained by the garbage collection code and also
+mutex, which is obtained by the garbage collection code and also
by the normal file system code _after_ allocation of space.
Ordering constraints:
1. Never attempt to allocate space or lock alloc_sem with
any f->sem held.
- 2. Never attempt to lock two file semaphores in one thread.
+ 2. Never attempt to lock two file mutexes in one thread.
No ordering rules have been made for doing so.
Note that the per-inode list of physical nodes (f->nodes) is a special
case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in
-the list are protected by the file semaphore f->sem. But the erase
-code may remove _obsolete_ nodes from the list while holding only the
+the list are protected by the file mutex f->sem. But the erase code
+may remove _obsolete_ nodes from the list while holding only the
erase_completion_lock. So you can walk the list only while holding the
erase_completion_lock, and can drop the lock temporarily mid-walk as
long as the pointer you're holding is to a _valid_ node, not an
erase_free_sem
--------------
-This semaphore is only used by the erase code which frees obsolete
-node references and the jffs2_garbage_collect_deletion_dirent()
-function. The latter function on NAND flash must read _obsolete_ nodes
-to determine whether the 'deletion dirent' under consideration can be
+This mutex is only used by the erase code which frees obsolete node
+references and the jffs2_garbage_collect_deletion_dirent() function.
+The latter function on NAND flash must read _obsolete_ nodes to
+determine whether the 'deletion dirent' under consideration can be
discarded or whether it is still required to show that an inode has
been unlinked. Because reading from the flash may sleep, the
erase_completion_lock cannot be held, so an alternative, more
void
__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
{
- down(&f->sem);
+ mutex_lock(&f->sem);
__jffs2_dbg_fragtree_paranoia_check_nolock(f);
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
void
void
__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
{
- down(&f->sem);
+ mutex_lock(&f->sem);
jffs2_dbg_dump_fragtree_nolock(f);
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
void
dir_f = JFFS2_INODE_INFO(dir_i);
c = JFFS2_SB_INFO(dir_i->i_sb);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
}
if (fd)
ino = fd->ino;
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
if (ino) {
inode = jffs2_iget(dir_i->i_sb, ino);
if (IS_ERR(inode)) {
}
curofs=1;
- down(&f->sem);
+ mutex_lock(&f->sem);
for (fd = f->dents; fd; fd = fd->next) {
curofs++;
break;
offset++;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
out:
filp->f_pos = offset;
return 0;
ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
if (!ret) {
- down(&f->sem);
+ mutex_lock(&f->sem);
old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
- up(&f->sem);
+ mutex_unlock(&f->sem);
d_instantiate(dentry, old_dentry->d_inode);
dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
atomic_inc(&old_dentry->d_inode->i_count);
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
f->target = kmalloc(targetlen + 1, GFP_KERNEL);
if (!f->target) {
printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return -ENOMEM;
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
if (IS_ERR(fn)) {
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
jffs2_clear_inode(inode);
return PTR_ERR(fn);
obsoleted by the first data write
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
}
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
as if it were the final unlink() */
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
if (S_ISDIR(new_dentry->d_inode->i_mode)) {
struct jffs2_full_dirent *fd;
- down(&victim_f->sem);
+ mutex_lock(&victim_f->sem);
for (fd = victim_f->dents; fd; fd = fd->next) {
if (fd->ino) {
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
return -ENOTEMPTY;
}
}
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
}
}
/* Don't oops if the victim was a dirent pointing to an
inode which didn't exist. */
if (victim_f->inocache) {
- down(&victim_f->sem);
+ mutex_lock(&victim_f->sem);
victim_f->inocache->nlink--;
- up(&victim_f->sem);
+ mutex_unlock(&victim_f->sem);
}
}
if (ret) {
/* Oh shit. We really ought to make a single node which can do both atomically */
struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
- down(&f->sem);
+ mutex_lock(&f->sem);
inc_nlink(old_dentry->d_inode);
if (f->inocache)
f->inocache->nlink++;
- up(&f->sem);
+ mutex_unlock(&f->sem);
printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
/* Might as well let the VFS know */
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
{
struct jffs2_eraseblock *jeb;
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
list_del(&jeb->list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_mark_erased_block(c, jeb);
if (!--count) {
jffs2_free_jeb_node_refs(c, jeb);
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_erase_block(c, jeb);
/* Be nice */
yield();
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
}
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
}
static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
/* Ensure that kupdated calls us again to mark them clean */
jffs2_erase_pending_trigger(c);
}
failed too many times. */
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
}
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size;
list_move(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
}
jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
}
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size;
c->free_size += jeb->free_size;
c->nr_erasing_blocks--;
c->nr_free_blocks++;
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
wake_up(&c->erase_wait);
return;
filebad:
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
/* Stick it on a list (any list) so erase_failed can take it
right off again. Silly, but shouldn't happen often. */
list_add(&jeb->list, &c->erasing_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
jffs2_erase_failed(c, jeb, bad_offset);
return;
refile:
/* Stick it back on the list from whence it came and come back later */
jffs2_erase_pending_trigger(c);
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_add(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
return;
}
struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
int ret;
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
if (ret)
goto out_page;
- down(&f->sem);
+ mutex_lock(&f->sem);
memset(&ri, 0, sizeof(ri));
ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
jffs2_complete_reservation(c);
- up(&f->sem);
+ mutex_unlock(&f->sem);
goto out_page;
}
ret = jffs2_add_full_dnode_to_inode(c, f, fn);
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
jffs2_complete_reservation(c);
- up(&f->sem);
+ mutex_unlock(&f->sem);
goto out_page;
}
jffs2_complete_reservation(c);
inode->i_size = pageofs;
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
/*
* case of a short-copy.
*/
if (!PageUptodate(pg)) {
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
- up(&f->sem);
+ mutex_unlock(&f->sem);
if (ret)
goto out_page;
}
mdata = (char *)&dev;
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
} else if (S_ISLNK(inode->i_mode)) {
- down(&f->sem);
+ mutex_lock(&f->sem);
mdatalen = f->metadata->size;
mdata = kmalloc(f->metadata->size, GFP_USER);
if (!mdata) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
return -ENOMEM;
}
ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
kfree(mdata);
return ret;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
}
kfree(mdata);
return ret;
}
- down(&f->sem);
+ mutex_lock(&f->sem);
ivalid = iattr->ia_valid;
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
if (IS_ERR(new_metadata)) {
jffs2_complete_reservation(c);
jffs2_free_raw_inode(ri);
- up(&f->sem);
+ mutex_unlock(&f->sem);
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
}
jffs2_free_raw_inode(ri);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
/* We have to do the vmtruncate() without f->sem held, since
c = JFFS2_SB_INFO(inode->i_sb);
jffs2_init_inode_info(f);
- down(&f->sem);
+ mutex_lock(&f->sem);
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
iget_failed(inode);
return ERR_PTR(ret);
}
printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
unlock_new_inode(inode);
error_io:
ret = -EIO;
error:
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
Flush the writebuffer, if neccecary, else we loose it */
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_stop_garbage_collect_thread(c);
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
}
if (!(*flags & MS_RDONLY))
f = JFFS2_INODE_INFO(inode);
jffs2_init_inode_info(f);
- down(&f->sem);
+ mutex_lock(&f->sem);
memset(ri, 0, sizeof(*ri));
/* Set OS-specific defaults for new inodes */
int ret = 0, inum, nlink;
int xattr = 0;
- if (down_interruptible(&c->alloc_sem))
+ if (mutex_lock_interruptible(&c->alloc_sem))
return -EINTR;
for (;;) {
c->unchecked_size);
jffs2_dbg_dump_block_lists_nolock(c);
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
made no progress in this case, but that should be OK */
c->checked_ino--;
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
return 0;
printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
if (!jeb) {
D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -EIO;
}
printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
if (!jeb->used_size) {
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
goto eraseit;
}
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
jeb->gc_node = raw;
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
BUG();
}
}
/* Just mark it obsolete */
jffs2_mark_node_obsolete(c, raw);
}
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
goto eraseit_lock;
}
*/
printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
ic->ino, ic->state);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
spin_unlock(&c->inocache_lock);
BUG();
the alloc_sem() (for marking nodes invalid) so we must
drop the alloc_sem before sleeping. */
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
ic->ino, ic->state));
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
ret = -ENOSPC;
}
release_sem:
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
eraseit_lock:
/* If we've finished this block, start it erasing */
uint32_t start = 0, end = 0, nrfrags = 0;
int ret = 0;
- down(&f->sem);
+ mutex_lock(&f->sem);
/* Now we have the lock for this inode. Check that it's still the one at the head
of the list. */
}
}
upnout:
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
/* Prevent the erase code from nicking the obsolete node refs while
we're looking at them. I really don't like this extra lock but
can't see any alternative. Suggestions on a postcard to... */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
/* OK. The name really does match. There really is still an older node on
the flash which our deletion dirent obsoletes. So we have to write out
a new deletion dirent to replace it */
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
return jffs2_garbage_collect_dirent(c, jeb, f, fd);
}
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
kfree(rd);
}
before letting GC proceed. Or we'd have to put ugliness
into the GC code so it didn't attempt to obtain the i_mutex
for the inode(s) which are already locked */
- struct semaphore sem;
+ struct mutex sem;
/* The highest (datanode) version number used for this ino */
uint32_t highest_version;
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
-#include <linux/semaphore.h>
+#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
struct completion gc_thread_start; /* GC thread start completion */
struct completion gc_thread_exit; /* GC thread exit completion port */
- struct semaphore alloc_sem; /* Used to protect all the following
+ struct mutex alloc_sem; /* Used to protect all the following
fields, and also to protect against
out-of-order writing of nodes. And GC. */
uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
/* Sem to allow jffs2_garbage_collect_deletion_dirent to
drop the erase_completion_lock while it's holding a pointer
to an obsoleted node. I don't like this. Alternatives welcomed. */
- struct semaphore erase_free_sem;
+ struct mutex erase_free_sem;
uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
minsize = PAD(minsize);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
dirty, c->unchecked_size, c->sector_size));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
avail, blocksneeded * c->sector_size));
spin_unlock(&c->erase_completion_lock);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return -ENOSPC;
}
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
if (signal_pending(current))
return -EINTR;
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
spin_lock(&c->erase_completion_lock);
}
if (!ret)
ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
if (ret)
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
{
D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
jffs2_garbage_collect_trigger(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
}
static inline int on_list(struct list_head *obj, struct list_head *head)
any jffs2_raw_node_refs. So we don't need to stop erases from
happening, or protect against people holding an obsolete
jffs2_raw_node_ref without the erase_completion_lock. */
- down(&c->erase_free_sem);
+ mutex_lock(&c->erase_free_sem);
}
spin_lock(&c->erase_completion_lock);
}
out_erase_sem:
- up(&c->erase_free_sem);
+ mutex_unlock(&c->erase_free_sem);
}
int jffs2_thread_should_wake(struct jffs2_sb_info *c)
JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
ret, retlen, sizeof(*latest_node));
/* FIXME: If this fails, there seems to be a memory leak. Find it. */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return ret?ret:-EIO;
}
if (crc != je32_to_cpu(latest_node->node_crc)) {
JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
f->inocache->ino, ref_offset(rii.latest_ref));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
if (!f->target) {
JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -ENOMEM;
}
ret = -EIO;
kfree(f->target);
f->target = NULL;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -ret;
}
if (f->metadata) {
JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
if (!frag_first(&f->fragtree)) {
JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
f->inocache->ino, jemode_to_cpu(latest_node->mode));
/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
return -EIO;
}
if (!f)
return -ENOMEM;
- init_MUTEX_LOCKED(&f->sem);
+ mutex_init(&f->sem);
+ mutex_lock(&f->sem);
f->inocache = ic;
ret = jffs2_do_read_inode_internal(c, f, &n);
if (!ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
}
kfree (f);
jffs2_clear_acl(f);
jffs2_xattr_delete_inode(c, f->inocache);
- down(&f->sem);
+ mutex_lock(&f->sem);
deleted = f->inocache && !f->inocache->nlink;
if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
jffs2_del_ino_cache(c, f->inocache);
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
}
{
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
- init_MUTEX(&ei->sem);
+ mutex_init(&ei->sem);
inode_init_once(&ei->vfs_inode);
}
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return 0;
}
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
- init_MUTEX(&c->alloc_sem);
- init_MUTEX(&c->erase_free_sem);
+ mutex_init(&c->alloc_sem);
+ mutex_init(&c->erase_free_sem);
init_waitqueue_head(&c->erase_wait);
init_waitqueue_head(&c->inocache_wq);
spin_lock_init(&c->erase_completion_lock);
D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
jffs2_sum_exit(c);
if (!jffs2_is_writebuffered(c))
return 0;
- if (!down_trylock(&c->alloc_sem)) {
- up(&c->alloc_sem);
+ if (mutex_trylock(&c->alloc_sem)) {
+ mutex_unlock(&c->alloc_sem);
printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
BUG();
}
if (!c->wbuf)
return 0;
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
if (!jffs2_wbuf_pending_for_ino(c, ino)) {
D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return 0;
}
} else while (old_wbuf_len &&
old_wbuf_ofs == c->wbuf_ofs) {
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
ret = jffs2_garbage_collect_pass(c);
if (ret) {
/* GC failed. Flush it with padding instead */
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
down_write(&c->wbuf_sem);
ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
/* retry flushing wbuf in case jffs2_wbuf_recover
up_write(&c->wbuf_sem);
break;
}
- down(&c->alloc_sem);
+ mutex_lock(&c->alloc_sem);
}
D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
- up(&c->alloc_sem);
+ mutex_unlock(&c->alloc_sem);
return ret;
}
JFFS2_SUMMARY_INODE_SIZE);
} else {
/* Locking pain */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
- down(&f->sem);
+ mutex_lock(&f->sem);
}
if (!ret) {
JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
/* Locking pain */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
- down(&f->sem);
+ mutex_lock(&f->sem);
}
if (!ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
break;
}
- down(&f->sem);
+ mutex_lock(&f->sem);
datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!retried) {
/* Write error to be retried */
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
break;
}
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
if (!datalen) {
printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
JFFS2_SUMMARY_INODE_SIZE);
D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
if (ret) {
- up(&f->sem);
+ mutex_unlock(&f->sem);
return ret;
}
if (IS_ERR(fn)) {
D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
/* Eeek. Wave bye bye */
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
return PTR_ERR(fn);
}
*/
f->metadata = fn;
- up(&f->sem);
+ mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode);
return -ENOMEM;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
/* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return 0;
}
return ret;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
/* File it. This will mark the old one obsolete. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
} else {
uint32_t nhash = full_name_hash(name, namelen);
fd = dir_f->dents;
/* We don't actually want to reserve any space, but we do
want to be holding the alloc_sem when we write to flash */
- down(&c->alloc_sem);
- down(&dir_f->sem);
+ mutex_lock(&c->alloc_sem);
+ mutex_lock(&dir_f->sem);
for (fd = dir_f->dents; fd; fd = fd->next) {
if (fd->nhash == nhash &&
break;
}
}
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
}
/* dead_f is NULL if this was a rename not a real unlink */
pointing to an inode which didn't exist. */
if (dead_f && dead_f->inocache) {
- down(&dead_f->sem);
+ mutex_lock(&dead_f->sem);
if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) {
while (dead_f->dents) {
dead_f->inocache->nlink--;
/* NB: Caller must set inode nlink if appropriate */
- up(&dead_f->sem);
+ mutex_unlock(&dead_f->sem);
}
jffs2_complete_reservation(c);
return ret;
}
- down(&dir_f->sem);
+ mutex_lock(&dir_f->sem);
/* Build a deletion node */
rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
if (IS_ERR(fd)) {
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return PTR_ERR(fd);
}
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
jffs2_complete_reservation(c);
- up(&dir_f->sem);
+ mutex_unlock(&dir_f->sem);
return 0;
}