}
spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref);
- init_rwsem(&ctx->state_sema);
+ mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
void spu_acquire(struct spu_context *ctx)
{
- down_read(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
}
void spu_release(struct spu_context *ctx)
{
- up_read(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
void spu_unmap_mappings(struct spu_context *ctx)
{
int ret = 0;
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
out:
if (ret)
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
return ret;
}
{
int ret = 0;
- down_read(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio;
return 0;
}
- up_read(&ctx->state_sema);
- down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
goto out;
}
- downgrade_write(&ctx->state_sema);
/* On success, we return holding the lock */
-
return ret;
out:
/* Release here, to simplify calling code. */
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
return ret;
}
void spu_acquire_saved(struct spu_context *ctx)
{
- down_read(&ctx->state_sema);
-
- if (ctx->state == SPU_STATE_SAVED)
- return;
-
- up_read(&ctx->state_sema);
- down_write(&ctx->state_sema);
-
+ mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_RUNNABLE)
spu_deactivate(ctx);
-
- downgrade_write(&ctx->state_sema);
}
spu_add_wq(wq, &wait, prio);
if (!signal_pending(current)) {
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
current->pid, current->prio);
schedule();
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
}
spu_del_wq(wq, &wait, prio);
struct spu *spu;
int need_yield = 0;
- if (down_write_trylock(&ctx->state_sema)) {
+ if (mutex_trylock(&ctx->state_mutex)) {
if ((spu = ctx->spu) != NULL) {
int best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
spu->prio = MAX_PRIO;
}
}
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
if (unlikely(need_yield))
yield();
#define SPUFS_H
#include <linux/kref.h>
-#include <linux/rwsem.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
- struct rw_semaphore state_sema;
+ struct mutex state_mutex;
struct semaphore run_sema;
struct mm_struct *owner;
static inline void spu_release_exclusive(struct spu_context *ctx)
{
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
int spu_activate(struct spu_context *ctx, u64 flags);