summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/fpu.h1
-rw-r--r--arch/sh/kernel/process_32.c10
2 files changed, 4 insertions, 7 deletions
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index d7709c06fac4..fb6bbb9b1cc8 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -25,6 +25,7 @@ void fpu_state_restore(struct pt_regs *regs);
#define save_fpu(tsk) do { } while (0)
#define release_fpu(regs) do { } while (0)
#define grab_fpu(regs) do { } while (0)
+#define fpu_state_restore(regs) do { } while (0)
#endif
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index d721f9297c09..d8af889366a4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -300,13 +300,11 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
{
struct thread_struct *next_t = &next->thread;
-#if defined(CONFIG_SH_FPU)
unlazy_fpu(prev, task_pt_regs(prev));
/* we're going to use this soon, after a few expensive things */
if (next->fpu_counter > 5)
prefetch(&next_t->fpu.hard);
-#endif
#ifdef CONFIG_MMU
/*
@@ -337,15 +335,13 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
#endif
}
-#if defined(CONFIG_SH_FPU)
- /* If the task has used fpu the last 5 timeslices, just do a full
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
- if (next->fpu_counter > 5) {
+ if (next->fpu_counter > 5)
fpu_state_restore(task_pt_regs(next));
- }
-#endif
return prev;
}
OpenPOWER on IntegriCloud