summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/include/asm/sections.h3
-rw-r--r--arch/powerpc/kernel/module_64.c13
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c29
4 files changed, 39 insertions, 8 deletions
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 14174aa24074..717a3bc1352e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -49,7 +49,7 @@ zlib := inffast.c inflate.c inftrees.c
zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h
zliblinuxheader := zlib.h zconf.h zutil.h
-$(addprefix $(obj)/,$(zlib) gunzip_util.o main.o): \
+$(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o prpmc2800.o): \
$(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 916018e425c4..7710e9e6660f 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -16,6 +16,9 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+#undef dereference_function_descriptor
+void *dereference_function_descriptor(void *);
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ee6a2982d567..ad79de272ff3 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -21,8 +21,9 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
+#include <linux/uaccess.h>
#include <asm/module.h>
-#include <asm/uaccess.h>
+#include <asm/sections.h>
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <linux/sort.h>
@@ -451,3 +452,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
}
+
+void *dereference_function_descriptor(void *ptr)
+{
+ struct ppc64_opd_entry *desc = ptr;
+ void *p;
+
+ if (!probe_kernel_address(&desc->funcaddr, p))
+ ptr = p;
+ return ptr;
+}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1c1b627ee843..67595bc380dc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx)
!(tmp->flags & SPU_CREATE_NOSCHED) &&
(!victim || tmp->prio > victim->prio)) {
victim = spu->ctx;
- get_spu_context(victim);
}
}
+ if (victim)
+ get_spu_context(victim);
mutex_unlock(&cbe_spu_info[node].list_mutex);
if (victim) {
@@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
/* not a candidate for interruptible because it's called either
from the scheduler thread or from spu_deactivate */
mutex_lock(&ctx->state_mutex);
- __spu_schedule(spu, ctx);
+ if (ctx->state == SPU_STATE_SAVED)
+ __spu_schedule(spu, ctx);
spu_release(ctx);
}
-static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
+/**
+ * spu_unschedule - remove a context from a spu, and possibly release it.
+ * @spu: The SPU to unschedule from
+ * @ctx: The context currently scheduled on the SPU
+ * @free_spu Whether to free the SPU for other contexts
+ *
+ * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
+ * SPU is made available for other contexts (ie, may be returned by
+ * spu_get_idle). If this is zero, the caller is expected to schedule another
+ * context to this spu.
+ *
+ * Should be called with ctx->state_mutex held.
+ */
+static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
+ int free_spu)
{
int node = spu->node;
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
- spu->alloc_state = SPU_FREE;
+ if (free_spu)
+ spu->alloc_state = SPU_FREE;
spu_unbind_context(spu, ctx);
ctx->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
@@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
if (spu) {
new = grab_runnable_context(max_prio, spu->node);
if (new || force) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, new == NULL);
if (new) {
if (new->flags & SPU_CREATE_NOSCHED)
wake_up(&new->stop_wq);
@@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
- spu_unschedule(spu, ctx);
+ spu_unschedule(spu, ctx, 0);
if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
spu_add_to_rq(ctx);
} else {
OpenPOWER on IntegriCloud