summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-02-13 21:36:50 +0100
committerArnd Bergmann <arnd@klappe.arndb.de>2007-02-13 21:52:37 +0100
commit650f8b0291ecd0abdeadbd0ff3d70c3538e55405 (patch)
tree3d3df208380ac7b2fafdd03b5fbcc01d2dedd934 /arch
parent202557d29eae528f464652e92085f3b19b05a0a7 (diff)
downloadtalos-obmc-linux-650f8b0291ecd0abdeadbd0ff3d70c3538e55405.tar.gz
talos-obmc-linux-650f8b0291ecd0abdeadbd0ff3d70c3538e55405.zip
[POWERPC] spufs: simplify state_mutex
The r/w semaphore to lock the spus was overkill and can be replaced with a mutex to make it faster, simpler and easier to debug. It also helps to allow making most spufs interruptible in future patches. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c33
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h6
3 files changed, 17 insertions, 30 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index ccffc449763b..c9aab9b1cd8a 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
}
spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref);
- init_rwsem(&ctx->state_sema);
+ mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
@@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref)
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
@@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx)
void spu_acquire(struct spu_context *ctx)
{
- down_read(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
}
void spu_release(struct spu_context *ctx)
{
- up_read(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
void spu_unmap_mappings(struct spu_context *ctx)
@@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
{
int ret = 0;
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
@@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
out:
if (ret)
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
return ret;
}
@@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx)
{
int ret = 0;
- down_read(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio;
return 0;
}
- up_read(&ctx->state_sema);
- down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) {
ret = -EINVAL;
@@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx)
goto out;
}
- downgrade_write(&ctx->state_sema);
/* On success, we return holding the lock */
-
return ret;
out:
/* Release here, to simplify calling code. */
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
return ret;
}
void spu_acquire_saved(struct spu_context *ctx)
{
- down_read(&ctx->state_sema);
-
- if (ctx->state == SPU_STATE_SAVED)
- return;
-
- up_read(&ctx->state_sema);
- down_write(&ctx->state_sema);
-
+ mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_RUNNABLE)
spu_deactivate(ctx);
-
- downgrade_write(&ctx->state_sema);
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1d330f67f5ae..c61a34b14083 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -233,11 +233,11 @@ static void spu_prio_wait(struct spu_context *ctx, u64 flags)
spu_add_wq(wq, &wait, prio);
if (!signal_pending(current)) {
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
current->pid, current->prio);
schedule();
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
}
spu_del_wq(wq, &wait, prio);
@@ -334,7 +334,7 @@ void spu_yield(struct spu_context *ctx)
struct spu *spu;
int need_yield = 0;
- if (down_write_trylock(&ctx->state_sema)) {
+ if (mutex_trylock(&ctx->state_mutex)) {
if ((spu = ctx->spu) != NULL) {
int best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
@@ -346,7 +346,7 @@ void spu_yield(struct spu_context *ctx)
spu->prio = MAX_PRIO;
}
}
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
if (unlikely(need_yield))
yield();
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 9b44abe921cc..de2401afb226 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -23,7 +23,7 @@
#define SPUFS_H
#include <linux/kref.h>
-#include <linux/rwsem.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
@@ -53,7 +53,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
- struct rw_semaphore state_sema;
+ struct mutex state_mutex;
struct semaphore run_sema;
struct mm_struct *owner;
@@ -173,7 +173,7 @@ int spu_acquire_exclusive(struct spu_context *ctx);
static inline void spu_release_exclusive(struct spu_context *ctx)
{
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
int spu_activate(struct spu_context *ctx, u64 flags);
OpenPOWER on IntegriCloud