diff options
author | Arnd Bergmann <arnd@arndb.de> | 2006-01-04 20:31:29 +0100 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 15:44:45 +1100 |
commit | ce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f (patch) | |
tree | abdb994ac4ded9a90684f0cdc71ea58e6247ab43 /arch | |
parent | 8837d9216f99048636fbb2c11347358e99e06181 (diff) | |
download | blackbird-op-linux-ce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f.tar.gz blackbird-op-linux-ce8ab8541203f6c7be5b2eeaa97f14f1d8d44e4f.zip |
[PATCH] spufs: move spu_run call to its own file
The logic for sys_spu_run keeps growing and it does
not really belong into file.c any more since we
moved away from using regular file operations to our
own syscall.
No functional change in here.
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 152 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 131 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 28 |
4 files changed, 160 insertions, 153 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index 9bfaba8791e3..a7cddf40e3d9 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_SPU_FS) += spufs.o spufs-y += inode.o file.o context.o switch.o syscalls.o -spufs-y += sched.o backing_ops.o hw_ops.o +spufs-y += sched.o backing_ops.o hw_ops.o run.o # Rules to build switch.o with the help of SPU tool chain SPU_CROSS := spu- diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index e63426822fd5..dfa649c9b956 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -304,34 +304,6 @@ static struct file_operations spufs_mbox_stat_fops = { .read = spufs_mbox_stat_read, }; -/* - * spufs_wait - * Same as wait_event_interruptible(), except that here - * we need to call spu_release(ctx) before sleeping, and - * then spu_acquire(ctx) when awoken. - */ - -#define spufs_wait(wq, condition) \ -({ \ - int __ret = 0; \ - DEFINE_WAIT(__wait); \ - for (;;) { \ - prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (!signal_pending(current)) { \ - spu_release(ctx); \ - schedule(); \ - spu_acquire(ctx); \ - continue; \ - } \ - __ret = -ERESTARTSYS; \ - break; \ - } \ - finish_wait(&(wq), &__wait); \ - __ret; \ -}) - /* low-level ibox access function */ size_t spu_ibox_read(struct spu_context *ctx, u32 *data) { @@ -529,130 +501,6 @@ static struct file_operations spufs_wbox_stat_fops = { .read = spufs_wbox_stat_read, }; -/* interrupt-level stop callback function. */ -void spufs_stop_callback(struct spu *spu) -{ - struct spu_context *ctx = spu->ctx; - - wake_up_all(&ctx->stop_wq); -} - -static inline int spu_stopped(struct spu_context *ctx, u32 * stat) -{ - struct spu *spu; - u64 pte_fault; - - *stat = ctx->ops->status_read(ctx); - if (ctx->state != SPU_STATE_RUNNABLE) - return 1; - spu = ctx->spu; - pte_fault = spu->dsisr & - (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); - return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; -} - -static inline int spu_run_init(struct spu_context *ctx, u32 * npc, - u32 * status) -{ - int ret; - - if ((ret = spu_acquire_runnable(ctx)) != 0) - return ret; - ctx->ops->npc_write(ctx, *npc); - ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); - return 0; -} - -static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, - u32 * status) -{ - int ret = 0; - - *status = ctx->ops->status_read(ctx); - *npc = ctx->ops->npc_read(ctx); - spu_release(ctx); - - if (signal_pending(current)) - ret = -ERESTARTSYS; - if (unlikely(current->ptrace & PT_PTRACED)) { - if ((*status & SPU_STATUS_STOPPED_BY_STOP) - && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { - force_sig(SIGTRAP, current); - ret = -ERESTARTSYS; - } - } - return ret; -} - -static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, - u32 *status) -{ - int ret; - - if ((ret = spu_run_fini(ctx, npc, status)) != 0) - return ret; - if (*status & (SPU_STATUS_STOPPED_BY_STOP | - SPU_STATUS_STOPPED_BY_HALT)) { - return *status; - } - if ((ret = spu_run_init(ctx, npc, status)) != 0) - return ret; - return 0; -} - -static inline int spu_process_events(struct spu_context *ctx) -{ - struct spu *spu = ctx->spu; - u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; - int ret = 0; - - if (spu->dsisr & pte_fault) - ret = spu_irq_class_1_bottom(spu); - if (spu->class_0_pending) - ret = spu_irq_class_0_bottom(spu); - if (!ret && signal_pending(current)) - ret = -ERESTARTSYS; - return ret; -} - -long spufs_run_spu(struct file *file, struct spu_context *ctx, - u32 * npc, u32 * status) -{ - int ret; - - if (down_interruptible(&ctx->run_sema)) - return -ERESTARTSYS; - - ret = spu_run_init(ctx, npc, status); - if (ret) - goto out; - - do { - ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status)); - if (unlikely(ret)) - break; - if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { - ret = spu_reacquire_runnable(ctx, npc, status); - if (ret) - goto out; - continue; - } - ret = spu_process_events(ctx); - - } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP | - SPU_STATUS_STOPPED_BY_HALT))); - - ctx->ops->runcntl_stop(ctx); - ret = spu_run_fini(ctx, npc, status); - if (!ret) - ret = *status; - spu_yield(ctx); - -out: - up(&ctx->run_sema); - return ret; -} - static ssize_t spufs_signal1_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c new file mode 100644 index 000000000000..18ea8866c61a --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -0,0 +1,131 @@ +#include <linux/wait.h> +#include <linux/ptrace.h> + +#include <asm/spu.h> + +#include "spufs.h" + +/* interrupt-level stop callback function. */ +void spufs_stop_callback(struct spu *spu) +{ + struct spu_context *ctx = spu->ctx; + + wake_up_all(&ctx->stop_wq); +} + +static inline int spu_stopped(struct spu_context *ctx, u32 * stat) +{ + struct spu *spu; + u64 pte_fault; + + *stat = ctx->ops->status_read(ctx); + if (ctx->state != SPU_STATE_RUNNABLE) + return 1; + spu = ctx->spu; + pte_fault = spu->dsisr & + (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); + return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; +} + +static inline int spu_run_init(struct spu_context *ctx, u32 * npc, + u32 * status) +{ + int ret; + + if ((ret = spu_acquire_runnable(ctx)) != 0) + return ret; + ctx->ops->npc_write(ctx, *npc); + ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); + return 0; +} + +static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, + u32 * status) +{ + int ret = 0; + + *status = ctx->ops->status_read(ctx); + *npc = ctx->ops->npc_read(ctx); + spu_release(ctx); + + if (signal_pending(current)) + ret = -ERESTARTSYS; + if (unlikely(current->ptrace & PT_PTRACED)) { + if ((*status & SPU_STATUS_STOPPED_BY_STOP) + && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { + force_sig(SIGTRAP, current); + ret = -ERESTARTSYS; + } + } + return ret; +} + +static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, + u32 *status) +{ + int ret; + + if ((ret = spu_run_fini(ctx, npc, status)) != 0) + return ret; + if (*status & (SPU_STATUS_STOPPED_BY_STOP | + SPU_STATUS_STOPPED_BY_HALT)) { + return *status; + } + if ((ret = spu_run_init(ctx, npc, status)) != 0) + return ret; + return 0; +} + +static inline int spu_process_events(struct spu_context *ctx) +{ + struct spu *spu = ctx->spu; + u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; + int ret = 0; + + if (spu->dsisr & pte_fault) + ret = spu_irq_class_1_bottom(spu); + if (spu->class_0_pending) + ret = spu_irq_class_0_bottom(spu); + if (!ret && signal_pending(current)) + ret = -ERESTARTSYS; + return ret; +} + +long spufs_run_spu(struct file *file, struct spu_context *ctx, + u32 * npc, u32 * status) +{ + int ret; + + if (down_interruptible(&ctx->run_sema)) + return -ERESTARTSYS; + + ret = spu_run_init(ctx, npc, status); + if (ret) + goto out; + + do { + ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status)); + if (unlikely(ret)) + break; + if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { + ret = spu_reacquire_runnable(ctx, npc, status); + if (ret) + goto out; + continue; + } + ret = spu_process_events(ctx); + + } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP | + SPU_STATUS_STOPPED_BY_HALT))); + + ctx->ops->runcntl_stop(ctx); + ret = spu_run_fini(ctx, npc, status); + if (!ret) + ret = *status; + spu_yield(ctx); + +out: + up(&ctx->run_sema); + return ret; +} + diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 48961ac584a1..c715ed0c4014 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -124,6 +124,34 @@ void spu_yield(struct spu_context *ctx); int __init spu_sched_init(void); void __exit spu_sched_exit(void); +/* + * spufs_wait + * Same as wait_event_interruptible(), except that here + * we need to call spu_release(ctx) before sleeping, and + * then spu_acquire(ctx) when awoken. + */ + +#define spufs_wait(wq, condition) \ +({ \ + int __ret = 0; \ + DEFINE_WAIT(__wait); \ + for (;;) { \ + prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (!signal_pending(current)) { \ + spu_release(ctx); \ + schedule(); \ + spu_acquire(ctx); \ + continue; \ + } \ + __ret = -ERESTARTSYS; \ + break; \ + } \ + finish_wait(&(wq), &__wait); \ + __ret; \ +}) + size_t spu_wbox_write(struct spu_context *ctx, u32 data); size_t spu_ibox_read(struct spu_context *ctx, u32 *data); |